Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Jun 2009 04:20:39 +0000 (21:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Jun 2009 04:20:39 +0000 (21:20 -0700)
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/radeon: switch to using late_initcall
  radeon legacy chips: tv dac bg/dac adj updates
  drm/radeon: introduce kernel modesetting for radeon hardware
  drm: Add the TTM GPU memory manager subsystem.
  drm: Memory fragmentation from lost alignment blocks
  drm/radeon: fix mobility flags on new PCI IDs.

75 files changed:
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/radeon/Kconfig [new file with mode: 0644]
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/ObjectID.h [new file with mode: 0644]
drivers/gpu/drm/radeon/atom-bits.h [new file with mode: 0644]
drivers/gpu/drm/radeon/atom-names.h [new file with mode: 0644]
drivers/gpu/drm/radeon/atom-types.h [new file with mode: 0644]
drivers/gpu/drm/radeon/atom.c [new file with mode: 0644]
drivers/gpu/drm/radeon/atom.h [new file with mode: 0644]
drivers/gpu/drm/radeon/atombios.h [new file with mode: 0644]
drivers/gpu/drm/radeon/atombios_crtc.c [new file with mode: 0644]
drivers/gpu/drm/radeon/r100.c [new file with mode: 0644]
drivers/gpu/drm/radeon/r300.c [new file with mode: 0644]
drivers/gpu/drm/radeon/r300_reg.h
drivers/gpu/drm/radeon/r420.c [new file with mode: 0644]
drivers/gpu/drm/radeon/r500_reg.h [new file with mode: 0644]
drivers/gpu/drm/radeon/r520.c [new file with mode: 0644]
drivers/gpu/drm/radeon/r600.c [new file with mode: 0644]
drivers/gpu/drm/radeon/r600_reg.h [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon.h [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_agp.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_asic.h [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_atombios.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_benchmark.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_bios.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_clocks.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_combios.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_connectors.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_cs.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_cursor.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_device.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_display.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_fb.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_fence.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_fixed.h [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_gart.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_gem.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_i2c.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_irq_kms.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_kms.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_legacy_crtc.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_legacy_encoders.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_mode.h [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_object.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_object.h [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_reg.h [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_ring.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_ttm.c [new file with mode: 0644]
drivers/gpu/drm/radeon/rs400.c [new file with mode: 0644]
drivers/gpu/drm/radeon/rs600.c [new file with mode: 0644]
drivers/gpu/drm/radeon/rs690.c [new file with mode: 0644]
drivers/gpu/drm/radeon/rs780.c [new file with mode: 0644]
drivers/gpu/drm/radeon/rv515.c [new file with mode: 0644]
drivers/gpu/drm/radeon/rv770.c [new file with mode: 0644]
drivers/gpu/drm/ttm/Makefile [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_agp_backend.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_bo.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_bo_util.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_bo_vm.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_global.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_memory.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_module.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_tt.c [new file with mode: 0644]
drivers/staging/Kconfig
include/drm/drm_pciids.h
include/drm/radeon_drm.h
include/drm/ttm/ttm_bo_api.h [new file with mode: 0644]
include/drm/ttm/ttm_bo_driver.h [new file with mode: 0644]
include/drm/ttm/ttm_memory.h [new file with mode: 0644]
include/drm/ttm/ttm_module.h [new file with mode: 0644]
include/drm/ttm/ttm_placement.h [new file with mode: 0644]

index f5d46e7..c961fe4 100644 (file)
@@ -18,6 +18,14 @@ menuconfig DRM
          details.  You should also select and configure AGP
          (/dev/agpgart) support.
 
+config DRM_TTM
+       tristate
+       depends on DRM
+       help
+         GPU memory management subsystem for devices with multiple
+         GPU memory types. Will be enabled automatically if a device driver
+         uses it.
+
 config DRM_TDFX
        tristate "3dfx Banshee/Voodoo3+"
        depends on DRM && PCI
@@ -36,6 +44,11 @@ config DRM_R128
 config DRM_RADEON
        tristate "ATI Radeon"
        depends on DRM && PCI
+       select FB_CFB_FILLRECT
+       select FB_CFB_COPYAREA
+       select FB_CFB_IMAGEBLIT
+       select FB
+       select FRAMEBUFFER_CONSOLE if !EMBEDDED
        help
          Choose this option if you have an ATI Radeon graphics card.  There
          are both PCI and AGP versions.  You don't need to choose this to
index 4ec5061..4e89ab0 100644 (file)
@@ -26,4 +26,4 @@ obj-$(CONFIG_DRM_I915)  += i915/
 obj-$(CONFIG_DRM_SIS)   += sis/
 obj-$(CONFIG_DRM_SAVAGE)+= savage/
 obj-$(CONFIG_DRM_VIA)  +=via/
-
+obj-$(CONFIG_DRM_TTM)  += ttm/
index 7819fd9..a912a0f 100644 (file)
@@ -188,36 +188,34 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
 
 
 
-struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
-                               unsigned long size, unsigned alignment)
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
+                                    unsigned long size, unsigned alignment)
 {
 
        struct drm_mm_node *align_splitoff = NULL;
-       struct drm_mm_node *child;
        unsigned tmp = 0;
 
        if (alignment)
-               tmp = parent->start % alignment;
+               tmp = node->start % alignment;
 
        if (tmp) {
                align_splitoff =
-                   drm_mm_split_at_start(parent, alignment - tmp, 0);
+                   drm_mm_split_at_start(node, alignment - tmp, 0);
                if (unlikely(align_splitoff == NULL))
                        return NULL;
        }
 
-       if (parent->size == size) {
-               list_del_init(&parent->fl_entry);
-               parent->free = 0;
-               return parent;
+       if (node->size == size) {
+               list_del_init(&node->fl_entry);
+               node->free = 0;
        } else {
-               child = drm_mm_split_at_start(parent, size, 0);
+               node = drm_mm_split_at_start(node, size, 0);
        }
 
        if (align_splitoff)
                drm_mm_put_block(align_splitoff);
 
-       return child;
+       return node;
 }
 
 EXPORT_SYMBOL(drm_mm_get_block);
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
new file mode 100644 (file)
index 0000000..2168d67
--- /dev/null
@@ -0,0 +1,34 @@
+config DRM_RADEON_KMS
+       bool "Enable modesetting on radeon by default"
+       depends on DRM_RADEON
+       select DRM_TTM
+       help
+         Choose this option if you want kernel modesetting enabled by default,
+         and you have a new enough userspace to support this. Running old
+         userspaces with this enabled will cause pain.
+
+         When kernel modesetting is enabled the IOCTL of radeon/drm
+         driver are considered as invalid and an error message is printed
+         in the log and they return failure.
+
+         KMS enabled userspace will use new API to talk with the radeon/drm
+         driver. The new API provide functions to create/destroy/share/mmap
+         buffer object which are then managed by the kernel memory manager
+         (here TTM). In order to submit command to the GPU the userspace
+         provide a buffer holding the command stream, along this buffer
+         userspace have to provide a list of buffer object used by the
+         command stream. The kernel radeon driver will then place buffer
+         in GPU accessible memory and will update command stream to reflect
+         the position of the different buffers.
+
+         The kernel will also perform security check on command stream
+         provided by the user, we want to catch and forbid any illegal use
+         of the GPU such as DMA into random system memory or into memory
+         not owned by the process supplying the command stream. This part
+         of the code is still incomplete and this why we propose that patch
+         as a staging driver addition, future security might forbid current
+         experimental userspace to run.
+
+         This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
+         (radeon up to X1950). Works is underway to provide support for R6XX,
+         R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
index 52ce439..5fae1e0 100644 (file)
@@ -3,7 +3,17 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm
-radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o r600_cp.o
+radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
+       radeon_irq.o r300_cmdbuf.o r600_cp.o
+
+radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
+       radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
+       atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
+       radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
+       radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
+       radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
+       radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
+       rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
new file mode 100644 (file)
index 0000000..6d0183c
--- /dev/null
@@ -0,0 +1,578 @@
+/*
+* Copyright 2006-2007 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*/
+/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
+
+#ifndef _OBJECTID_H
+#define _OBJECTID_H
+
+#if defined(_X86_)
+#pragma pack(1)
+#endif
+
+/****************************************************/
+/* Graphics Object Type Definition                  */
+/****************************************************/
+#define GRAPH_OBJECT_TYPE_NONE                    0x0
+#define GRAPH_OBJECT_TYPE_GPU                     0x1
+#define GRAPH_OBJECT_TYPE_ENCODER                 0x2
+#define GRAPH_OBJECT_TYPE_CONNECTOR               0x3
+#define GRAPH_OBJECT_TYPE_ROUTER                  0x4
+/* deleted */
+
+/****************************************************/
+/* Encoder Object ID Definition                     */
+/****************************************************/
+#define ENCODER_OBJECT_ID_NONE                    0x00
+
+/* Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_LVDS           0x01
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS1          0x02
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS2          0x03
+#define ENCODER_OBJECT_ID_INTERNAL_DAC1           0x04
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2           0x05 /* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOA          0x06
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOB          0x07
+
+/* External Third Party Encoders */
+#define ENCODER_OBJECT_ID_SI170B                  0x08
+#define ENCODER_OBJECT_ID_CH7303                  0x09
+#define ENCODER_OBJECT_ID_CH7301                  0x0A
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1           0x0B /* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA          0x0C
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB          0x0D
+#define ENCODER_OBJECT_ID_TITFP513                0x0E
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1          0x0F /* not used for Radeon */
+#define ENCODER_OBJECT_ID_VT1623                  0x10
+#define ENCODER_OBJECT_ID_HDMI_SI1930             0x11
+#define ENCODER_OBJECT_ID_HDMI_INTERNAL           0x12
+/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1   0x13
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1    0x14
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1    0x15
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2    0x16 /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178                   0X17 /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA               0x18 /* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_DDI            0x19
+#define ENCODER_OBJECT_ID_VT1625                  0x1A
+#define ENCODER_OBJECT_ID_HDMI_SI1932             0x1B
+#define ENCODER_OBJECT_ID_DP_AN9801               0x1C
+#define ENCODER_OBJECT_ID_DP_DP501                0x1D
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY         0x1E
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA   0x1F
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1        0x20
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2        0x21
+
+#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
+
+/****************************************************/
+/* Connector Object ID Definition                   */
+/****************************************************/
+#define CONNECTOR_OBJECT_ID_NONE                  0x00
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I     0x01
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I       0x02
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D     0x03
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D       0x04
+#define CONNECTOR_OBJECT_ID_VGA                   0x05
+#define CONNECTOR_OBJECT_ID_COMPOSITE             0x06
+#define CONNECTOR_OBJECT_ID_SVIDEO                0x07
+#define CONNECTOR_OBJECT_ID_YPbPr                 0x08
+#define CONNECTOR_OBJECT_ID_D_CONNECTOR           0x09
+#define CONNECTOR_OBJECT_ID_9PIN_DIN              0x0A /* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_SCART                 0x0B
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A           0x0C
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B           0x0D
+#define CONNECTOR_OBJECT_ID_LVDS                  0x0E
+#define CONNECTOR_OBJECT_ID_7PIN_DIN              0x0F
+#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR        0x10
+#define CONNECTOR_OBJECT_ID_CROSSFIRE             0x11
+#define CONNECTOR_OBJECT_ID_HARDCODE_DVI          0x12
+#define CONNECTOR_OBJECT_ID_DISPLAYPORT           0x13
+
+/* deleted */
+
+/****************************************************/
+/* Router Object ID Definition                      */
+/****************************************************/
+#define ROUTER_OBJECT_ID_NONE                                                                                  0x00
+#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL                             0x01
+
+/****************************************************/
+/* Graphics Object ENUM ID Definition               */
+/****************************************************/
+#define GRAPH_OBJECT_ENUM_ID1                     0x01
+#define GRAPH_OBJECT_ENUM_ID2                     0x02
+#define GRAPH_OBJECT_ENUM_ID3                     0x03
+#define GRAPH_OBJECT_ENUM_ID4                     0x04
+#define GRAPH_OBJECT_ENUM_ID5                     0x05
+#define GRAPH_OBJECT_ENUM_ID6                     0x06
+
+/****************************************************/
+/* Graphics Object ID Bit definition                */
+/****************************************************/
+#define OBJECT_ID_MASK                            0x00FF
+#define ENUM_ID_MASK                              0x0700
+#define RESERVED1_ID_MASK                         0x0800
+#define OBJECT_TYPE_MASK                          0x7000
+#define RESERVED2_ID_MASK                         0x8000
+
+#define OBJECT_ID_SHIFT                           0x00
+#define ENUM_ID_SHIFT                             0x08
+#define OBJECT_TYPE_SHIFT                         0x0C
+
+/****************************************************/
+/* Graphics Object family definition                */
+/****************************************************/
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \
+       (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+        GRAPHICS_OBJECT_ID   << OBJECT_ID_SHIFT)
+/****************************************************/
+/* GPU Object ID definition - Shared with BIOS      */
+/****************************************************/
+#define GPU_ENUM_ID1   (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+                        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+
+/****************************************************/
+/* Encoder Object ID definition - Shared with BIOS  */
+/****************************************************/
+/*
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1        0x2101
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1       0x2102
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1       0x2103
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1        0x2104
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1        0x2105
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1       0x2106
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1       0x2107
+#define ENCODER_SIL170B_ENUM_ID1              0x2108
+#define ENCODER_CH7303_ENUM_ID1               0x2109
+#define ENCODER_CH7301_ENUM_ID1               0x210A
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1        0x210B
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1       0x210C
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1       0x210D
+#define ENCODER_TITFP513_ENUM_ID1             0x210E
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1       0x210F
+#define ENCODER_VT1623_ENUM_ID1               0x2110
+#define ENCODER_HDMI_SI1930_ENUM_ID1          0x2111
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1        0x2112
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   0x2113
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    0x2114
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    0x2115
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    0x2116
+#define ENCODER_SI178_ENUM_ID1                   0x2117
+#define ENCODER_MVPU_FPGA_ENUM_ID1               0x2118
+#define ENCODER_INTERNAL_DDI_ENUM_ID1            0x2119
+#define ENCODER_VT1625_ENUM_ID1                  0x211A
+#define ENCODER_HDMI_SI1932_ENUM_ID1             0x211B
+#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1       0x211C
+#define ENCODER_DP_DP501_ENUM_ID1                0x211D
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         0x211E
+*/
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_TITFP513_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */
+
+#define ENCODER_SI178_ENUM_ID1  \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1625_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Connector Object ID definition - Shared with BIOS */
+/****************************************************/
+/*
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1        0x3101
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1          0x3102
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1        0x3103
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1          0x3104
+#define CONNECTOR_VGA_ENUM_ID1                      0x3105
+#define CONNECTOR_COMPOSITE_ENUM_ID1                0x3106
+#define CONNECTOR_SVIDEO_ENUM_ID1                   0x3107
+#define CONNECTOR_YPbPr_ENUM_ID1                    0x3108
+#define CONNECTOR_D_CONNECTORE_ENUM_ID1             0x3109
+#define CONNECTOR_9PIN_DIN_ENUM_ID1                 0x310A
+#define CONNECTOR_SCART_ENUM_ID1                    0x310B
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1              0x310C
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1              0x310D
+#define CONNECTOR_LVDS_ENUM_ID1                     0x310E
+#define CONNECTOR_7PIN_DIN_ENUM_ID1                 0x310F
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1           0x3110
+*/
+#define CONNECTOR_LVDS_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \
+       (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+        CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Router Object ID definition - Shared with BIOS   */
+/****************************************************/
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \
+       (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+        GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+        ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+
+/* deleted */
+
+/****************************************************/
+/* Object Cap definition - Shared with BIOS         */
+/****************************************************/
+#define GRAPHICS_OBJECT_CAP_I2C                 0x00000001L
+#define GRAPHICS_OBJECT_CAP_TABLE_ID            0x00000002L
+
+#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID                   0x01
+#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID     0x02
+#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID    0x03
+
+#if defined(_X86_)
+#pragma pack()
+#endif
+
+#endif /*GRAPHICTYPE */
diff --git a/drivers/gpu/drm/radeon/atom-bits.h b/drivers/gpu/drm/radeon/atom-bits.h
new file mode 100644 (file)
index 0000000..e8fae5c
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_BITS_H
+#define ATOM_BITS_H
+
+static inline uint8_t get_u8(void *bios, int ptr)
+{
+    return ((unsigned char *)bios)[ptr];
+}
+#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
+#define CU8(ptr) get_u8(ctx->bios, (ptr))
+static inline uint16_t get_u16(void *bios, int ptr)
+{
+    return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+}
+#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
+#define CU16(ptr) get_u16(ctx->bios, (ptr))
+static inline uint32_t get_u32(void *bios, int ptr)
+{
+    return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
+}
+#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
+#define CU32(ptr) get_u32(ctx->bios, (ptr))
+#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atom-names.h b/drivers/gpu/drm/radeon/atom-names.h
new file mode 100644 (file)
index 0000000..6f907a5
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_NAMES_H
+#define ATOM_NAMES_H
+
+#include "atom.h"
+
+#ifdef ATOM_DEBUG
+
+#define ATOM_OP_NAMES_CNT 123
+static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
+"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
+"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
+"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
+"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
+"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
+"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
+"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
+"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
+"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
+"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
+"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
+"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
+"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
+"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
+"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
+"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
+"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
+"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
+"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
+"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
+"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
+"DEBUG", "CTB_DS",
+};
+
+#define ATOM_TABLE_NAMES_CNT 74
+static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
+"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
+"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
+"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
+"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
+"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
+"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
+"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
+"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
+"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
+"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
+"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
+"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
+"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
+"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
+"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
+"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
+"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
+"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
+"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
+"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
+"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
+"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
+"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
+"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
+"MemoryDeviceInit", "EnableYUV",
+};
+
+#define ATOM_IO_NAMES_CNT 5
+static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
+"MM", "PLL", "MC", "PCIE", "PCIE PORT",
+};
+
+#else
+
+#define ATOM_OP_NAMES_CNT 0
+#define ATOM_TABLE_NAMES_CNT 0
+#define ATOM_IO_NAMES_CNT 0
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atom-types.h b/drivers/gpu/drm/radeon/atom-types.h
new file mode 100644 (file)
index 0000000..1125b86
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Dave Airlie
+ */
+
+#ifndef ATOM_TYPES_H
+#define ATOM_TYPES_H
+
+/* sync atom types to kernel types */
+
+typedef uint16_t USHORT;
+typedef uint32_t ULONG;
+typedef uint8_t UCHAR;
+
+
+#ifndef ATOM_BIG_ENDIAN
+#if defined(__BIG_ENDIAN)
+#define ATOM_BIG_ENDIAN 1
+#else
+#define ATOM_BIG_ENDIAN 0
+#endif
+#endif
+#endif
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
new file mode 100644 (file)
index 0000000..901befe
--- /dev/null
@@ -0,0 +1,1215 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#define ATOM_DEBUG
+
+#include "atom.h"
+#include "atom-names.h"
+#include "atom-bits.h"
+
+#define ATOM_COND_ABOVE                0
+#define ATOM_COND_ABOVEOREQUAL 1
+#define ATOM_COND_ALWAYS       2
+#define ATOM_COND_BELOW                3
+#define ATOM_COND_BELOWOREQUAL 4
+#define ATOM_COND_EQUAL                5
+#define ATOM_COND_NOTEQUAL     6
+
+#define ATOM_PORT_ATI  0
+#define ATOM_PORT_PCI  1
+#define ATOM_PORT_SYSIO        2
+
+#define ATOM_UNIT_MICROSEC     0
+#define ATOM_UNIT_MILLISEC     1
+
+#define PLL_INDEX      2
+#define PLL_DATA       3
+
+typedef struct {
+       struct atom_context *ctx;
+
+       uint32_t *ps, *ws;
+       int ps_shift;
+       uint16_t start;
+} atom_exec_context;
+
+int atom_debug = 0;
+void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+
+static uint32_t atom_arg_mask[8] =
+    { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
+0xFF000000 };
+static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
+
+static int atom_dst_to_src[8][4] = {
+       /* translate destination alignment field to the source alignment encoding */
+       {0, 0, 0, 0},
+       {1, 2, 3, 0},
+       {1, 2, 3, 0},
+       {1, 2, 3, 0},
+       {4, 5, 6, 7},
+       {4, 5, 6, 7},
+       {4, 5, 6, 7},
+       {4, 5, 6, 7},
+};
+static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
+
+static int debug_depth = 0;
+#ifdef ATOM_DEBUG
+static void debug_print_spaces(int n)
+{
+       while (n--)
+               printk("   ");
+}
+
+#define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
+#define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
+#else
+#define DEBUG(...) do { } while (0)
+#define SDEBUG(...) do { } while (0)
+#endif
+
+static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
+                                uint32_t index, uint32_t data)
+{
+       uint32_t temp = 0xCDCDCDCD;
+       while (1)
+               switch (CU8(base)) {
+               case ATOM_IIO_NOP:
+                       base++;
+                       break;
+               case ATOM_IIO_READ:
+                       temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
+                       base += 3;
+                       break;
+               case ATOM_IIO_WRITE:
+                       ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
+                       base += 3;
+                       break;
+               case ATOM_IIO_CLEAR:
+                       temp &=
+                           ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+                             CU8(base + 2));
+                       base += 3;
+                       break;
+               case ATOM_IIO_SET:
+                       temp |=
+                           (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
+                                                                       2);
+                       base += 3;
+                       break;
+               case ATOM_IIO_MOVE_INDEX:
+                       temp &=
+                           ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+                             CU8(base + 2));
+                       temp |=
+                           ((index >> CU8(base + 2)) &
+                            (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+                                                                         3);
+                       base += 4;
+                       break;
+               case ATOM_IIO_MOVE_DATA:
+                       temp &=
+                           ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+                             CU8(base + 2));
+                       temp |=
+                           ((data >> CU8(base + 2)) &
+                            (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+                                                                         3);
+                       base += 4;
+                       break;
+               case ATOM_IIO_MOVE_ATTR:
+                       temp &=
+                           ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+                             CU8(base + 2));
+                       temp |=
+                           ((ctx->
+                             io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
+                                                                         CU8
+                                                                         (base
+                                                                          +
+                                                                          1))))
+                           << CU8(base + 3);
+                       base += 4;
+                       break;
+               case ATOM_IIO_END:
+                       return temp;
+               default:
+                       printk(KERN_INFO "Unknown IIO opcode.\n");
+                       return 0;
+               }
+}
+
+static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+                                int *ptr, uint32_t *saved, int print)
+{
+       uint32_t idx, val = 0xCDCDCDCD, align, arg;
+       struct atom_context *gctx = ctx->ctx;
+       arg = attr & 7;
+       align = (attr >> 3) & 7;
+       switch (arg) {
+       case ATOM_ARG_REG:
+               idx = U16(*ptr);
+               (*ptr) += 2;
+               if (print)
+                       DEBUG("REG[0x%04X]", idx);
+               idx += gctx->reg_block;
+               switch (gctx->io_mode) {
+               case ATOM_IO_MM:
+                       val = gctx->card->reg_read(gctx->card, idx);
+                       break;
+               case ATOM_IO_PCI:
+                       printk(KERN_INFO
+                              "PCI registers are not implemented.\n");
+                       return 0;
+               case ATOM_IO_SYSIO:
+                       printk(KERN_INFO
+                              "SYSIO registers are not implemented.\n");
+                       return 0;
+               default:
+                       if (!(gctx->io_mode & 0x80)) {
+                               printk(KERN_INFO "Bad IO mode.\n");
+                               return 0;
+                       }
+                       if (!gctx->iio[gctx->io_mode & 0x7F]) {
+                               printk(KERN_INFO
+                                      "Undefined indirect IO read method %d.\n",
+                                      gctx->io_mode & 0x7F);
+                               return 0;
+                       }
+                       val =
+                           atom_iio_execute(gctx,
+                                            gctx->iio[gctx->io_mode & 0x7F],
+                                            idx, 0);
+               }
+               break;
+       case ATOM_ARG_PS:
+               idx = U8(*ptr);
+               (*ptr)++;
+               val = le32_to_cpu(ctx->ps[idx]);
+               if (print)
+                       DEBUG("PS[0x%02X,0x%04X]", idx, val);
+               break;
+       case ATOM_ARG_WS:
+               idx = U8(*ptr);
+               (*ptr)++;
+               if (print)
+                       DEBUG("WS[0x%02X]", idx);
+               switch (idx) {
+               case ATOM_WS_QUOTIENT:
+                       val = gctx->divmul[0];
+                       break;
+               case ATOM_WS_REMAINDER:
+                       val = gctx->divmul[1];
+                       break;
+               case ATOM_WS_DATAPTR:
+                       val = gctx->data_block;
+                       break;
+               case ATOM_WS_SHIFT:
+                       val = gctx->shift;
+                       break;
+               case ATOM_WS_OR_MASK:
+                       val = 1 << gctx->shift;
+                       break;
+               case ATOM_WS_AND_MASK:
+                       val = ~(1 << gctx->shift);
+                       break;
+               case ATOM_WS_FB_WINDOW:
+                       val = gctx->fb_base;
+                       break;
+               case ATOM_WS_ATTRIBUTES:
+                       val = gctx->io_attr;
+                       break;
+               default:
+                       val = ctx->ws[idx];
+               }
+               break;
+       case ATOM_ARG_ID:
+               idx = U16(*ptr);
+               (*ptr) += 2;
+               if (print) {
+                       if (gctx->data_block)
+                               DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
+                       else
+                               DEBUG("ID[0x%04X]", idx);
+               }
+               val = U32(idx + gctx->data_block);
+               break;
+       case ATOM_ARG_FB:
+               idx = U8(*ptr);
+               (*ptr)++;
+               if (print)
+                       DEBUG("FB[0x%02X]", idx);
+               printk(KERN_INFO "FB access is not implemented.\n");
+               return 0;
+       case ATOM_ARG_IMM:
+               switch (align) {
+               case ATOM_SRC_DWORD:
+                       val = U32(*ptr);
+                       (*ptr) += 4;
+                       if (print)
+                               DEBUG("IMM 0x%08X\n", val);
+                       return val;
+               case ATOM_SRC_WORD0:
+               case ATOM_SRC_WORD8:
+               case ATOM_SRC_WORD16:
+                       val = U16(*ptr);
+                       (*ptr) += 2;
+                       if (print)
+                               DEBUG("IMM 0x%04X\n", val);
+                       return val;
+               case ATOM_SRC_BYTE0:
+               case ATOM_SRC_BYTE8:
+               case ATOM_SRC_BYTE16:
+               case ATOM_SRC_BYTE24:
+                       val = U8(*ptr);
+                       (*ptr)++;
+                       if (print)
+                               DEBUG("IMM 0x%02X\n", val);
+                       return val;
+               }
+               return 0;
+       case ATOM_ARG_PLL:
+               idx = U8(*ptr);
+               (*ptr)++;
+               if (print)
+                       DEBUG("PLL[0x%02X]", idx);
+               val = gctx->card->pll_read(gctx->card, idx);
+               break;
+       case ATOM_ARG_MC:
+               idx = U8(*ptr);
+               (*ptr)++;
+               if (print)
+                       DEBUG("MC[0x%02X]", idx);
+               val = gctx->card->mc_read(gctx->card, idx);
+               break;
+       }
+       if (saved)
+               *saved = val;
+       val &= atom_arg_mask[align];
+       val >>= atom_arg_shift[align];
+       if (print)
+               switch (align) {
+               case ATOM_SRC_DWORD:
+                       DEBUG(".[31:0] -> 0x%08X\n", val);
+                       break;
+               case ATOM_SRC_WORD0:
+                       DEBUG(".[15:0] -> 0x%04X\n", val);
+                       break;
+               case ATOM_SRC_WORD8:
+                       DEBUG(".[23:8] -> 0x%04X\n", val);
+                       break;
+               case ATOM_SRC_WORD16:
+                       DEBUG(".[31:16] -> 0x%04X\n", val);
+                       break;
+               case ATOM_SRC_BYTE0:
+                       DEBUG(".[7:0] -> 0x%02X\n", val);
+                       break;
+               case ATOM_SRC_BYTE8:
+                       DEBUG(".[15:8] -> 0x%02X\n", val);
+                       break;
+               case ATOM_SRC_BYTE16:
+                       DEBUG(".[23:16] -> 0x%02X\n", val);
+                       break;
+               case ATOM_SRC_BYTE24:
+                       DEBUG(".[31:24] -> 0x%02X\n", val);
+                       break;
+               }
+       return val;
+}
+
+static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+       uint32_t align = (attr >> 3) & 7, arg = attr & 7;
+       switch (arg) {
+       case ATOM_ARG_REG:
+       case ATOM_ARG_ID:
+               (*ptr) += 2;
+               break;
+       case ATOM_ARG_PLL:
+       case ATOM_ARG_MC:
+       case ATOM_ARG_PS:
+       case ATOM_ARG_WS:
+       case ATOM_ARG_FB:
+               (*ptr)++;
+               break;
+       case ATOM_ARG_IMM:
+               switch (align) {
+               case ATOM_SRC_DWORD:
+                       (*ptr) += 4;
+                       return;
+               case ATOM_SRC_WORD0:
+               case ATOM_SRC_WORD8:
+               case ATOM_SRC_WORD16:
+                       (*ptr) += 2;
+                       return;
+               case ATOM_SRC_BYTE0:
+               case ATOM_SRC_BYTE8:
+               case ATOM_SRC_BYTE16:
+               case ATOM_SRC_BYTE24:
+                       (*ptr)++;
+                       return;
+               }
+               return;
+       }
+}
+
+static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+       return atom_get_src_int(ctx, attr, ptr, NULL, 1);
+}
+
+static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+                            int *ptr, uint32_t *saved, int print)
+{
+       return atom_get_src_int(ctx,
+                               arg | atom_dst_to_src[(attr >> 3) &
+                                                     7][(attr >> 6) & 3] << 3,
+                               ptr, saved, print);
+}
+
+static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
+{
+       atom_skip_src_int(ctx,
+                         arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
+                                                                3] << 3, ptr);
+}
+
+static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+                        int *ptr, uint32_t val, uint32_t saved)
+{
+       uint32_t align =
+           atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
+           val, idx;
+       struct atom_context *gctx = ctx->ctx;
+       old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
+       val <<= atom_arg_shift[align];
+       val &= atom_arg_mask[align];
+       saved &= ~atom_arg_mask[align];
+       val |= saved;
+       switch (arg) {
+       case ATOM_ARG_REG:
+               idx = U16(*ptr);
+               (*ptr) += 2;
+               DEBUG("REG[0x%04X]", idx);
+               idx += gctx->reg_block;
+               switch (gctx->io_mode) {
+               case ATOM_IO_MM:
+                       if (idx == 0)
+                               gctx->card->reg_write(gctx->card, idx,
+                                                     val << 2);
+                       else
+                               gctx->card->reg_write(gctx->card, idx, val);
+                       break;
+               case ATOM_IO_PCI:
+                       printk(KERN_INFO
+                              "PCI registers are not implemented.\n");
+                       return;
+               case ATOM_IO_SYSIO:
+                       printk(KERN_INFO
+                              "SYSIO registers are not implemented.\n");
+                       return;
+               default:
+                       if (!(gctx->io_mode & 0x80)) {
+                               printk(KERN_INFO "Bad IO mode.\n");
+                               return;
+                       }
+                       if (!gctx->iio[gctx->io_mode & 0xFF]) {
+                               printk(KERN_INFO
+                                      "Undefined indirect IO write method %d.\n",
+                                      gctx->io_mode & 0x7F);
+                               return;
+                       }
+                       atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
+                                        idx, val);
+               }
+               break;
+       case ATOM_ARG_PS:
+               idx = U8(*ptr);
+               (*ptr)++;
+               DEBUG("PS[0x%02X]", idx);
+               ctx->ps[idx] = cpu_to_le32(val);
+               break;
+       case ATOM_ARG_WS:
+               idx = U8(*ptr);
+               (*ptr)++;
+               DEBUG("WS[0x%02X]", idx);
+               switch (idx) {
+               case ATOM_WS_QUOTIENT:
+                       gctx->divmul[0] = val;
+                       break;
+               case ATOM_WS_REMAINDER:
+                       gctx->divmul[1] = val;
+                       break;
+               case ATOM_WS_DATAPTR:
+                       gctx->data_block = val;
+                       break;
+               case ATOM_WS_SHIFT:
+                       gctx->shift = val;
+                       break;
+               case ATOM_WS_OR_MASK:
+               case ATOM_WS_AND_MASK:
+                       break;
+               case ATOM_WS_FB_WINDOW:
+                       gctx->fb_base = val;
+                       break;
+               case ATOM_WS_ATTRIBUTES:
+                       gctx->io_attr = val;
+                       break;
+               default:
+                       ctx->ws[idx] = val;
+               }
+               break;
+       case ATOM_ARG_FB:
+               idx = U8(*ptr);
+               (*ptr)++;
+               DEBUG("FB[0x%02X]", idx);
+               printk(KERN_INFO "FB access is not implemented.\n");
+               return;
+       case ATOM_ARG_PLL:
+               idx = U8(*ptr);
+               (*ptr)++;
+               DEBUG("PLL[0x%02X]", idx);
+               gctx->card->pll_write(gctx->card, idx, val);
+               break;
+       case ATOM_ARG_MC:
+               idx = U8(*ptr);
+               (*ptr)++;
+               DEBUG("MC[0x%02X]", idx);
+               gctx->card->mc_write(gctx->card, idx, val);
+               return;
+       }
+       switch (align) {
+       case ATOM_SRC_DWORD:
+               DEBUG(".[31:0] <- 0x%08X\n", old_val);
+               break;
+       case ATOM_SRC_WORD0:
+               DEBUG(".[15:0] <- 0x%04X\n", old_val);
+               break;
+       case ATOM_SRC_WORD8:
+               DEBUG(".[23:8] <- 0x%04X\n", old_val);
+               break;
+       case ATOM_SRC_WORD16:
+               DEBUG(".[31:16] <- 0x%04X\n", old_val);
+               break;
+       case ATOM_SRC_BYTE0:
+               DEBUG(".[7:0] <- 0x%02X\n", old_val);
+               break;
+       case ATOM_SRC_BYTE8:
+               DEBUG(".[15:8] <- 0x%02X\n", old_val);
+               break;
+       case ATOM_SRC_BYTE16:
+               DEBUG(".[23:16] <- 0x%02X\n", old_val);
+               break;
+       case ATOM_SRC_BYTE24:
+               DEBUG(".[31:24] <- 0x%02X\n", old_val);
+               break;
+       }
+}
+
+static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src, saved;
+       int dptr = *ptr;
+       SDEBUG("   dst: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+       SDEBUG("   src: ");
+       src = atom_get_src(ctx, attr, ptr);
+       dst += src;
+       SDEBUG("   dst: ");
+       atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src, saved;
+       int dptr = *ptr;
+       SDEBUG("   dst: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+       SDEBUG("   src: ");
+       src = atom_get_src(ctx, attr, ptr);
+       dst &= src;
+       SDEBUG("   dst: ");
+       atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
+{
+       printk("ATOM BIOS beeped!\n");
+}
+
+static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
+{
+       int idx = U8((*ptr)++);
+       if (idx < ATOM_TABLE_NAMES_CNT)
+               SDEBUG("   table: %d (%s)\n", idx, atom_table_names[idx]);
+       else
+               SDEBUG("   table: %d\n", idx);
+       if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
+               atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+}
+
+static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t saved;
+       int dptr = *ptr;
+       attr &= 0x38;
+       attr |= atom_def_dst[attr >> 3] << 6;
+       atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+       SDEBUG("   dst: ");
+       atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
+}
+
+static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src;
+       SDEBUG("   src1: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+       SDEBUG("   src2: ");
+       src = atom_get_src(ctx, attr, ptr);
+       ctx->ctx->cs_equal = (dst == src);
+       ctx->ctx->cs_above = (dst > src);
+       SDEBUG("   result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
+              ctx->ctx->cs_above ? "GT" : "LE");
+}
+
+static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t count = U8((*ptr)++);
+       SDEBUG("   count: %d\n", count);
+       if (arg == ATOM_UNIT_MICROSEC)
+               schedule_timeout_uninterruptible(usecs_to_jiffies(count));
+       else
+               schedule_timeout_uninterruptible(msecs_to_jiffies(count));
+}
+
+static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src;
+       SDEBUG("   src1: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+       SDEBUG("   src2: ");
+       src = atom_get_src(ctx, attr, ptr);
+       if (src != 0) {
+               ctx->ctx->divmul[0] = dst / src;
+               ctx->ctx->divmul[1] = dst % src;
+       } else {
+               ctx->ctx->divmul[0] = 0;
+               ctx->ctx->divmul[1] = 0;
+       }
+}
+
+static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
+{
+       /* functionally, a nop */
+}
+
+static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
+{
+       int execute = 0, target = U16(*ptr);
+       (*ptr) += 2;
+       switch (arg) {
+       case ATOM_COND_ABOVE:
+               execute = ctx->ctx->cs_above;
+               break;
+       case ATOM_COND_ABOVEOREQUAL:
+               execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
+               break;
+       case ATOM_COND_ALWAYS:
+               execute = 1;
+               break;
+       case ATOM_COND_BELOW:
+               execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
+               break;
+       case ATOM_COND_BELOWOREQUAL:
+               execute = !ctx->ctx->cs_above;
+               break;
+       case ATOM_COND_EQUAL:
+               execute = ctx->ctx->cs_equal;
+               break;
+       case ATOM_COND_NOTEQUAL:
+               execute = !ctx->ctx->cs_equal;
+               break;
+       }
+       if (arg != ATOM_COND_ALWAYS)
+               SDEBUG("   taken: %s\n", execute ? "yes" : "no");
+       SDEBUG("   target: 0x%04X\n", target);
+       if (execute)
+               *ptr = ctx->start + target;
+}
+
+static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src1, src2, saved;
+       int dptr = *ptr;
+       SDEBUG("   dst: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+       SDEBUG("   src1: ");
+       src1 = atom_get_src(ctx, attr, ptr);
+       SDEBUG("   src2: ");
+       src2 = atom_get_src(ctx, attr, ptr);
+       dst &= src1;
+       dst |= src2;
+       SDEBUG("   dst: ");
+       atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t src, saved;
+       int dptr = *ptr;
+       if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
+               atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+       else {
+               atom_skip_dst(ctx, arg, attr, ptr);
+               saved = 0xCDCDCDCD;
+       }
+       SDEBUG("   src: ");
+       src = atom_get_src(ctx, attr, ptr);
+       SDEBUG("   dst: ");
+       atom_put_dst(ctx, arg, attr, &dptr, src, saved);
+}
+
+static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src;
+       SDEBUG("   src1: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+       SDEBUG("   src2: ");
+       src = atom_get_src(ctx, attr, ptr);
+       ctx->ctx->divmul[0] = dst * src;
+}
+
+static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
+{
+       /* nothing */
+}
+
+static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src, saved;
+       int dptr = *ptr;
+       SDEBUG("   dst: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+       SDEBUG("   src: ");
+       src = atom_get_src(ctx, attr, ptr);
+       dst |= src;
+       SDEBUG("   dst: ");
+       atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t val = U8((*ptr)++);
+       SDEBUG("POST card output: 0x%02X\n", val);
+}
+
+static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
+{
+       printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+       printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+       printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
+{
+       int idx = U8(*ptr);
+       (*ptr)++;
+       SDEBUG("   block: %d\n", idx);
+       if (!idx)
+               ctx->ctx->data_block = 0;
+       else if (idx == 255)
+               ctx->ctx->data_block = ctx->start;
+       else
+               ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
+       SDEBUG("   base: 0x%04X\n", ctx->ctx->data_block);
+}
+
+static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       SDEBUG("   fb_base: ");
+       ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
+}
+
+static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
+{
+       int port;
+       switch (arg) {
+       case ATOM_PORT_ATI:
+               port = U16(*ptr);
+               if (port < ATOM_IO_NAMES_CNT)
+                       SDEBUG("   port: %d (%s)\n", port, atom_io_names[port]);
+               else
+                       SDEBUG("   port: %d\n", port);
+               if (!port)
+                       ctx->ctx->io_mode = ATOM_IO_MM;
+               else
+                       ctx->ctx->io_mode = ATOM_IO_IIO | port;
+               (*ptr) += 2;
+               break;
+       case ATOM_PORT_PCI:
+               ctx->ctx->io_mode = ATOM_IO_PCI;
+               (*ptr)++;
+               break;
+       case ATOM_PORT_SYSIO:
+               ctx->ctx->io_mode = ATOM_IO_SYSIO;
+               (*ptr)++;
+               break;
+       }
+}
+
+static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
+{
+       ctx->ctx->reg_block = U16(*ptr);
+       (*ptr) += 2;
+       SDEBUG("   base: 0x%04X\n", ctx->ctx->reg_block);
+}
+
+static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++), shift;
+       uint32_t saved, dst;
+       int dptr = *ptr;
+       attr &= 0x38;
+       attr |= atom_def_dst[attr >> 3] << 6;
+       SDEBUG("   dst: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+       shift = U8((*ptr)++);
+       SDEBUG("   shift: %d\n", shift);
+       dst <<= shift;
+       SDEBUG("   dst: ");
+       atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++), shift;
+       uint32_t saved, dst;
+       int dptr = *ptr;
+       attr &= 0x38;
+       attr |= atom_def_dst[attr >> 3] << 6;
+       SDEBUG("   dst: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+       shift = U8((*ptr)++);
+       SDEBUG("   shift: %d\n", shift);
+       dst >>= shift;
+       SDEBUG("   dst: ");
+       atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src, saved;
+       int dptr = *ptr;
+       SDEBUG("   dst: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+       SDEBUG("   src: ");
+       src = atom_get_src(ctx, attr, ptr);
+       dst -= src;
+       SDEBUG("   dst: ");
+       atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t src, val, target;
+       SDEBUG("   switch: ");
+       src = atom_get_src(ctx, attr, ptr);
+       while (U16(*ptr) != ATOM_CASE_END)
+               if (U8(*ptr) == ATOM_CASE_MAGIC) {
+                       (*ptr)++;
+                       SDEBUG("   case: ");
+                       val =
+                           atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
+                                        ptr);
+                       target = U16(*ptr);
+                       if (val == src) {
+                               SDEBUG("   target: %04X\n", target);
+                               *ptr = ctx->start + target;
+                               return;
+                       }
+                       (*ptr) += 2;
+               } else {
+                       printk(KERN_INFO "Bad case.\n");
+                       return;
+               }
+       (*ptr) += 2;
+}
+
+static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src;
+       SDEBUG("   src1: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+       SDEBUG("   src2: ");
+       src = atom_get_src(ctx, attr, ptr);
+       ctx->ctx->cs_equal = ((dst & src) == 0);
+       SDEBUG("   result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
+}
+
+static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src, saved;
+       int dptr = *ptr;
+       SDEBUG("   dst: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+       SDEBUG("   src: ");
+       src = atom_get_src(ctx, attr, ptr);
+       dst ^= src;
+       SDEBUG("   dst: ");
+       atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
+{
+       printk(KERN_INFO "unimplemented!\n");
+}
+
+static struct {
+       void (*func) (atom_exec_context *, int *, int);
+       int arg;
+} opcode_table[ATOM_OP_CNT] = {
+       {
+       NULL, 0}, {
+       atom_op_move, ATOM_ARG_REG}, {
+       atom_op_move, ATOM_ARG_PS}, {
+       atom_op_move, ATOM_ARG_WS}, {
+       atom_op_move, ATOM_ARG_FB}, {
+       atom_op_move, ATOM_ARG_PLL}, {
+       atom_op_move, ATOM_ARG_MC}, {
+       atom_op_and, ATOM_ARG_REG}, {
+       atom_op_and, ATOM_ARG_PS}, {
+       atom_op_and, ATOM_ARG_WS}, {
+       atom_op_and, ATOM_ARG_FB}, {
+       atom_op_and, ATOM_ARG_PLL}, {
+       atom_op_and, ATOM_ARG_MC}, {
+       atom_op_or, ATOM_ARG_REG}, {
+       atom_op_or, ATOM_ARG_PS}, {
+       atom_op_or, ATOM_ARG_WS}, {
+       atom_op_or, ATOM_ARG_FB}, {
+       atom_op_or, ATOM_ARG_PLL}, {
+       atom_op_or, ATOM_ARG_MC}, {
+       atom_op_shl, ATOM_ARG_REG}, {
+       atom_op_shl, ATOM_ARG_PS}, {
+       atom_op_shl, ATOM_ARG_WS}, {
+       atom_op_shl, ATOM_ARG_FB}, {
+       atom_op_shl, ATOM_ARG_PLL}, {
+       atom_op_shl, ATOM_ARG_MC}, {
+       atom_op_shr, ATOM_ARG_REG}, {
+       atom_op_shr, ATOM_ARG_PS}, {
+       atom_op_shr, ATOM_ARG_WS}, {
+       atom_op_shr, ATOM_ARG_FB}, {
+       atom_op_shr, ATOM_ARG_PLL}, {
+       atom_op_shr, ATOM_ARG_MC}, {
+       atom_op_mul, ATOM_ARG_REG}, {
+       atom_op_mul, ATOM_ARG_PS}, {
+       atom_op_mul, ATOM_ARG_WS}, {
+       atom_op_mul, ATOM_ARG_FB}, {
+       atom_op_mul, ATOM_ARG_PLL}, {
+       atom_op_mul, ATOM_ARG_MC}, {
+       atom_op_div, ATOM_ARG_REG}, {
+       atom_op_div, ATOM_ARG_PS}, {
+       atom_op_div, ATOM_ARG_WS}, {
+       atom_op_div, ATOM_ARG_FB}, {
+       atom_op_div, ATOM_ARG_PLL}, {
+       atom_op_div, ATOM_ARG_MC}, {
+       atom_op_add, ATOM_ARG_REG}, {
+       atom_op_add, ATOM_ARG_PS}, {
+       atom_op_add, ATOM_ARG_WS}, {
+       atom_op_add, ATOM_ARG_FB}, {
+       atom_op_add, ATOM_ARG_PLL}, {
+       atom_op_add, ATOM_ARG_MC}, {
+       atom_op_sub, ATOM_ARG_REG}, {
+       atom_op_sub, ATOM_ARG_PS}, {
+       atom_op_sub, ATOM_ARG_WS}, {
+       atom_op_sub, ATOM_ARG_FB}, {
+       atom_op_sub, ATOM_ARG_PLL}, {
+       atom_op_sub, ATOM_ARG_MC}, {
+       atom_op_setport, ATOM_PORT_ATI}, {
+       atom_op_setport, ATOM_PORT_PCI}, {
+       atom_op_setport, ATOM_PORT_SYSIO}, {
+       atom_op_setregblock, 0}, {
+       atom_op_setfbbase, 0}, {
+       atom_op_compare, ATOM_ARG_REG}, {
+       atom_op_compare, ATOM_ARG_PS}, {
+       atom_op_compare, ATOM_ARG_WS}, {
+       atom_op_compare, ATOM_ARG_FB}, {
+       atom_op_compare, ATOM_ARG_PLL}, {
+       atom_op_compare, ATOM_ARG_MC}, {
+       atom_op_switch, 0}, {
+       atom_op_jump, ATOM_COND_ALWAYS}, {
+       atom_op_jump, ATOM_COND_EQUAL}, {
+       atom_op_jump, ATOM_COND_BELOW}, {
+       atom_op_jump, ATOM_COND_ABOVE}, {
+       atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
+       atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
+       atom_op_jump, ATOM_COND_NOTEQUAL}, {
+       atom_op_test, ATOM_ARG_REG}, {
+       atom_op_test, ATOM_ARG_PS}, {
+       atom_op_test, ATOM_ARG_WS}, {
+       atom_op_test, ATOM_ARG_FB}, {
+       atom_op_test, ATOM_ARG_PLL}, {
+       atom_op_test, ATOM_ARG_MC}, {
+       atom_op_delay, ATOM_UNIT_MILLISEC}, {
+       atom_op_delay, ATOM_UNIT_MICROSEC}, {
+       atom_op_calltable, 0}, {
+       atom_op_repeat, 0}, {
+       atom_op_clear, ATOM_ARG_REG}, {
+       atom_op_clear, ATOM_ARG_PS}, {
+       atom_op_clear, ATOM_ARG_WS}, {
+       atom_op_clear, ATOM_ARG_FB}, {
+       atom_op_clear, ATOM_ARG_PLL}, {
+       atom_op_clear, ATOM_ARG_MC}, {
+       atom_op_nop, 0}, {
+       atom_op_eot, 0}, {
+       atom_op_mask, ATOM_ARG_REG}, {
+       atom_op_mask, ATOM_ARG_PS}, {
+       atom_op_mask, ATOM_ARG_WS}, {
+       atom_op_mask, ATOM_ARG_FB}, {
+       atom_op_mask, ATOM_ARG_PLL}, {
+       atom_op_mask, ATOM_ARG_MC}, {
+       atom_op_postcard, 0}, {
+       atom_op_beep, 0}, {
+       atom_op_savereg, 0}, {
+       atom_op_restorereg, 0}, {
+       atom_op_setdatablock, 0}, {
+       atom_op_xor, ATOM_ARG_REG}, {
+       atom_op_xor, ATOM_ARG_PS}, {
+       atom_op_xor, ATOM_ARG_WS}, {
+       atom_op_xor, ATOM_ARG_FB}, {
+       atom_op_xor, ATOM_ARG_PLL}, {
+       atom_op_xor, ATOM_ARG_MC}, {
+       atom_op_shl, ATOM_ARG_REG}, {
+       atom_op_shl, ATOM_ARG_PS}, {
+       atom_op_shl, ATOM_ARG_WS}, {
+       atom_op_shl, ATOM_ARG_FB}, {
+       atom_op_shl, ATOM_ARG_PLL}, {
+       atom_op_shl, ATOM_ARG_MC}, {
+       atom_op_shr, ATOM_ARG_REG}, {
+       atom_op_shr, ATOM_ARG_PS}, {
+       atom_op_shr, ATOM_ARG_WS}, {
+       atom_op_shr, ATOM_ARG_FB}, {
+       atom_op_shr, ATOM_ARG_PLL}, {
+       atom_op_shr, ATOM_ARG_MC}, {
+atom_op_debug, 0},};
+
+void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+       int base = CU16(ctx->cmd_table + 4 + 2 * index);
+       int len, ws, ps, ptr;
+       unsigned char op;
+       atom_exec_context ectx;
+
+       if (!base)
+               return;
+
+       len = CU16(base + ATOM_CT_SIZE_PTR);
+       ws = CU8(base + ATOM_CT_WS_PTR);
+       ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
+       ptr = base + ATOM_CT_CODE_PTR;
+
+       SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
+
+       /* reset reg block */
+       ctx->reg_block = 0;
+       ectx.ctx = ctx;
+       ectx.ps_shift = ps / 4;
+       ectx.start = base;
+       ectx.ps = params;
+       if (ws)
+               ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
+       else
+               ectx.ws = NULL;
+
+       debug_depth++;
+       while (1) {
+               op = CU8(ptr++);
+               if (op < ATOM_OP_NAMES_CNT)
+                       SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
+               else
+                       SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
+
+               if (op < ATOM_OP_CNT && op > 0)
+                       opcode_table[op].func(&ectx, &ptr,
+                                             opcode_table[op].arg);
+               else
+                       break;
+
+               if (op == ATOM_OP_EOT)
+                       break;
+       }
+       debug_depth--;
+       SDEBUG("<<\n");
+
+       if (ws)
+               kfree(ectx.ws);
+}
+
+static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
+
+static void atom_index_iio(struct atom_context *ctx, int base)
+{
+       ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
+       while (CU8(base) == ATOM_IIO_START) {
+               ctx->iio[CU8(base + 1)] = base + 2;
+               base += 2;
+               while (CU8(base) != ATOM_IIO_END)
+                       base += atom_iio_len[CU8(base)];
+               base += 3;
+       }
+}
+
+struct atom_context *atom_parse(struct card_info *card, void *bios)
+{
+       int base;
+       struct atom_context *ctx =
+           kzalloc(sizeof(struct atom_context), GFP_KERNEL);
+       char *str;
+       char name[512];
+       int i;
+
+       ctx->card = card;
+       ctx->bios = bios;
+
+       if (CU16(0) != ATOM_BIOS_MAGIC) {
+               printk(KERN_INFO "Invalid BIOS magic.\n");
+               kfree(ctx);
+               return NULL;
+       }
+       if (strncmp
+           (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
+            strlen(ATOM_ATI_MAGIC))) {
+               printk(KERN_INFO "Invalid ATI magic.\n");
+               kfree(ctx);
+               return NULL;
+       }
+
+       base = CU16(ATOM_ROM_TABLE_PTR);
+       if (strncmp
+           (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
+            strlen(ATOM_ROM_MAGIC))) {
+               printk(KERN_INFO "Invalid ATOM magic.\n");
+               kfree(ctx);
+               return NULL;
+       }
+
+       ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
+       ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
+       atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+
+       str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
+       while (*str && ((*str == '\n') || (*str == '\r')))
+               str++;
+       /* name string isn't always 0 terminated */
+       for (i = 0; i < 511; i++) {
+               name[i] = str[i];
+               if (name[i] < '.' || name[i] > 'z') {
+                       name[i] = 0;
+                       break;
+               }
+       }
+       printk(KERN_INFO "ATOM BIOS: %s\n", name);
+
+       return ctx;
+}
+
+int atom_asic_init(struct atom_context *ctx)
+{
+       int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
+       uint32_t ps[16];
+       memset(ps, 0, 64);
+
+       ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
+       ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
+       if (!ps[0] || !ps[1])
+               return 1;
+
+       if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
+               return 1;
+       atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+
+       return 0;
+}
+
+void atom_destroy(struct atom_context *ctx)
+{
+       if (ctx->iio)
+               kfree(ctx->iio);
+       kfree(ctx);
+}
+
+void atom_parse_data_header(struct atom_context *ctx, int index,
+                           uint16_t * size, uint8_t * frev, uint8_t * crev,
+                           uint16_t * data_start)
+{
+       int offset = index * 2 + 4;
+       int idx = CU16(ctx->data_table + offset);
+
+       if (size)
+               *size = CU16(idx);
+       if (frev)
+               *frev = CU8(idx + 2);
+       if (crev)
+               *crev = CU8(idx + 3);
+       *data_start = idx;
+       return;
+}
+
+void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+                          uint8_t * crev)
+{
+       int offset = index * 2 + 4;
+       int idx = CU16(ctx->cmd_table + offset);
+
+       if (frev)
+               *frev = CU8(idx + 2);
+       if (crev)
+               *crev = CU8(idx + 3);
+       return;
+}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
new file mode 100644 (file)
index 0000000..e6eb38f
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_H
+#define ATOM_H
+
+#include <linux/types.h>
+#include "drmP.h"
+
+#define ATOM_BIOS_MAGIC                0xAA55
+#define ATOM_ATI_MAGIC_PTR     0x30
+#define ATOM_ATI_MAGIC         " 761295520"
+#define ATOM_ROM_TABLE_PTR     0x48
+
+#define ATOM_ROM_MAGIC         "ATOM"
+#define ATOM_ROM_MAGIC_PTR     4
+
+#define ATOM_ROM_MSG_PTR       0x10
+#define ATOM_ROM_CMD_PTR       0x1E
+#define ATOM_ROM_DATA_PTR      0x20
+
+#define ATOM_CMD_INIT          0
+#define ATOM_CMD_SETSCLK       0x0A
+#define ATOM_CMD_SETMCLK       0x0B
+#define ATOM_CMD_SETPCLK       0x0C
+
+#define ATOM_DATA_FWI_PTR      0xC
+#define ATOM_DATA_IIO_PTR      0x32
+
+#define ATOM_FWI_DEFSCLK_PTR   8
+#define ATOM_FWI_DEFMCLK_PTR   0xC
+#define ATOM_FWI_MAXSCLK_PTR   0x24
+#define ATOM_FWI_MAXMCLK_PTR   0x28
+
+#define ATOM_CT_SIZE_PTR       0
+#define ATOM_CT_WS_PTR         4
+#define ATOM_CT_PS_PTR         5
+#define ATOM_CT_PS_MASK                0x7F
+#define ATOM_CT_CODE_PTR       6
+
+#define ATOM_OP_CNT            123
+#define ATOM_OP_EOT            91
+
+#define ATOM_CASE_MAGIC                0x63
+#define ATOM_CASE_END          0x5A5A
+
+#define ATOM_ARG_REG           0
+#define ATOM_ARG_PS            1
+#define ATOM_ARG_WS            2
+#define ATOM_ARG_FB            3
+#define ATOM_ARG_ID            4
+#define ATOM_ARG_IMM           5
+#define ATOM_ARG_PLL           6
+#define ATOM_ARG_MC            7
+
+#define ATOM_SRC_DWORD         0
+#define ATOM_SRC_WORD0         1
+#define ATOM_SRC_WORD8         2
+#define ATOM_SRC_WORD16                3
+#define ATOM_SRC_BYTE0         4
+#define ATOM_SRC_BYTE8         5
+#define ATOM_SRC_BYTE16                6
+#define ATOM_SRC_BYTE24                7
+
+#define ATOM_WS_QUOTIENT       0x40
+#define ATOM_WS_REMAINDER      0x41
+#define ATOM_WS_DATAPTR                0x42
+#define ATOM_WS_SHIFT          0x43
+#define ATOM_WS_OR_MASK                0x44
+#define ATOM_WS_AND_MASK       0x45
+#define ATOM_WS_FB_WINDOW      0x46
+#define ATOM_WS_ATTRIBUTES     0x47
+
+#define ATOM_IIO_NOP           0
+#define ATOM_IIO_START         1
+#define ATOM_IIO_READ          2
+#define ATOM_IIO_WRITE         3
+#define ATOM_IIO_CLEAR         4
+#define ATOM_IIO_SET           5
+#define ATOM_IIO_MOVE_INDEX    6
+#define ATOM_IIO_MOVE_ATTR     7
+#define ATOM_IIO_MOVE_DATA     8
+#define ATOM_IIO_END           9
+
+#define ATOM_IO_MM             0
+#define ATOM_IO_PCI            1
+#define ATOM_IO_SYSIO          2
+#define ATOM_IO_IIO            0x80
+
+struct card_info {
+       struct drm_device *dev;
+       void (* reg_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* reg_read)(struct card_info *, uint32_t);          /*  filled by driver */
+       void (* mc_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* mc_read)(struct card_info *, uint32_t);          /*  filled by driver */
+       void (* pll_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* pll_read)(struct card_info *, uint32_t);          /*  filled by driver */
+};
+
+struct atom_context {
+       struct card_info *card;
+       void *bios;
+       uint32_t cmd_table, data_table;
+       uint16_t *iio;
+
+       uint16_t data_block;
+       uint32_t fb_base;
+       uint32_t divmul[2];
+       uint16_t io_attr;
+       uint16_t reg_block;
+       uint8_t shift;
+       int cs_equal, cs_above;
+       int io_mode;
+};
+
+extern int atom_debug;
+
+struct atom_context *atom_parse(struct card_info *, void *);
+void atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_asic_init(struct atom_context *);
+void atom_destroy(struct atom_context *);
+void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
+#include "atom-types.h"
+#include "atombios.h"
+#include "ObjectID.h"
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
new file mode 100644 (file)
index 0000000..cf67928
--- /dev/null
@@ -0,0 +1,4785 @@
+/*
+ * Copyright 2006-2007 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/****************************************************************************/
+/*Portion I: Definitions  shared between VBIOS and Driver                   */
+/****************************************************************************/
+
+#ifndef _ATOMBIOS_H
+#define _ATOMBIOS_H
+
+#define ATOM_VERSION_MAJOR                   0x00020000
+#define ATOM_VERSION_MINOR                   0x00000002
+
+#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR)
+
+/* Endianness should be specified before inclusion,
+ * default to little endian
+ */
+#ifndef ATOM_BIG_ENDIAN
+#error Endian not specified
+#endif
+
+#ifdef _H2INC
+#ifndef ULONG
+typedef unsigned long ULONG;
+#endif
+
+#ifndef UCHAR
+typedef unsigned char UCHAR;
+#endif
+
+#ifndef USHORT
+typedef unsigned short USHORT;
+#endif
+#endif
+
+#define ATOM_DAC_A            0
+#define ATOM_DAC_B            1
+#define ATOM_EXT_DAC          2
+
+#define ATOM_CRTC1            0
+#define ATOM_CRTC2            1
+
+#define ATOM_DIGA             0
+#define ATOM_DIGB             1
+
+#define ATOM_PPLL1            0
+#define ATOM_PPLL2            1
+
+#define ATOM_SCALER1          0
+#define ATOM_SCALER2          1
+
+#define ATOM_SCALER_DISABLE   0
+#define ATOM_SCALER_CENTER    1
+#define ATOM_SCALER_EXPANSION 2
+#define ATOM_SCALER_MULTI_EX  3
+
+#define ATOM_DISABLE          0
+#define ATOM_ENABLE           1
+#define ATOM_LCD_BLOFF                          (ATOM_DISABLE+2)
+#define ATOM_LCD_BLON                           (ATOM_ENABLE+2)
+#define ATOM_LCD_BL_BRIGHTNESS_CONTROL          (ATOM_ENABLE+3)
+#define ATOM_LCD_SELFTEST_START                                                                        (ATOM_DISABLE+5)
+#define ATOM_LCD_SELFTEST_STOP                                                                 (ATOM_ENABLE+5)
+#define ATOM_ENCODER_INIT                                        (ATOM_DISABLE+7)
+
+#define ATOM_BLANKING         1
+#define ATOM_BLANKING_OFF     0
+
+#define ATOM_CURSOR1          0
+#define ATOM_CURSOR2          1
+
+#define ATOM_ICON1            0
+#define ATOM_ICON2            1
+
+#define ATOM_CRT1             0
+#define ATOM_CRT2             1
+
+#define ATOM_TV_NTSC          1
+#define ATOM_TV_NTSCJ         2
+#define ATOM_TV_PAL           3
+#define ATOM_TV_PALM          4
+#define ATOM_TV_PALCN         5
+#define ATOM_TV_PALN          6
+#define ATOM_TV_PAL60         7
+#define ATOM_TV_SECAM         8
+#define ATOM_TV_CV            16
+
+#define ATOM_DAC1_PS2         1
+#define ATOM_DAC1_CV          2
+#define ATOM_DAC1_NTSC        3
+#define ATOM_DAC1_PAL         4
+
+#define ATOM_DAC2_PS2         ATOM_DAC1_PS2
+#define ATOM_DAC2_CV          ATOM_DAC1_CV
+#define ATOM_DAC2_NTSC        ATOM_DAC1_NTSC
+#define ATOM_DAC2_PAL         ATOM_DAC1_PAL
+
+#define ATOM_PM_ON            0
+#define ATOM_PM_STANDBY       1
+#define ATOM_PM_SUSPEND       2
+#define ATOM_PM_OFF           3
+
+/* Bit0:{=0:single, =1:dual},
+   Bit1 {=0:666RGB, =1:888RGB},
+   Bit2:3:{Grey level}
+   Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}*/
+
+#define ATOM_PANEL_MISC_DUAL               0x00000001
+#define ATOM_PANEL_MISC_888RGB             0x00000002
+#define ATOM_PANEL_MISC_GREY_LEVEL         0x0000000C
+#define ATOM_PANEL_MISC_FPDI               0x00000010
+#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT   2
+#define ATOM_PANEL_MISC_SPATIAL            0x00000020
+#define ATOM_PANEL_MISC_TEMPORAL           0x00000040
+#define ATOM_PANEL_MISC_API_ENABLED        0x00000080
+
+#define MEMTYPE_DDR1              "DDR1"
+#define MEMTYPE_DDR2              "DDR2"
+#define MEMTYPE_DDR3              "DDR3"
+#define MEMTYPE_DDR4              "DDR4"
+
+#define ASIC_BUS_TYPE_PCI         "PCI"
+#define ASIC_BUS_TYPE_AGP         "AGP"
+#define ASIC_BUS_TYPE_PCIE        "PCI_EXPRESS"
+
+/* Maximum size of that FireGL flag string */
+
+#define ATOM_FIREGL_FLAG_STRING     "FGL"      /* Flag used to enable FireGL Support */
+#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING  3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */
+
+#define ATOM_FAKE_DESKTOP_STRING    "DSK"      /* Flag used to enable mobile ASIC on Desktop */
+#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING  ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
+
+#define ATOM_M54T_FLAG_STRING       "M54T"     /* Flag used to enable M54T Support */
+#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING    4 /* sizeof( ATOM_M54T_FLAG_STRING ) */
+
+#define HW_ASSISTED_I2C_STATUS_FAILURE          2
+#define HW_ASSISTED_I2C_STATUS_SUCCESS          1
+
+#pragma pack(1)                        /* BIOS data must use byte aligment */
+
+/*  Define offset to location of ROM header. */
+
+#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER           0x00000048L
+#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE                              0x00000002L
+
+#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE    0x94
+#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE   20     /* including the terminator 0x0! */
+#define        OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER           0x002f
+#define        OFFSET_TO_GET_ATOMBIOS_STRINGS_START            0x006e
+
+/* Common header for all ROM Data tables.
+  Every table pointed  _ATOM_MASTER_DATA_TABLE has this common header.
+  And the pointer actually points to this header. */
+
+typedef struct _ATOM_COMMON_TABLE_HEADER {
+       USHORT usStructureSize;
+       UCHAR ucTableFormatRevision;    /*Change it when the Parser is not backward compatible */
+       UCHAR ucTableContentRevision;   /*Change it only when the table needs to change but the firmware */
+       /*Image can't be updated, while Driver needs to carry the new table! */
+} ATOM_COMMON_TABLE_HEADER;
+
+typedef struct _ATOM_ROM_HEADER {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       UCHAR uaFirmWareSignature[4];   /*Signature to distinguish between Atombios and non-atombios,
+                                          atombios should init it as "ATOM", don't change the position */
+       USHORT usBiosRuntimeSegmentAddress;
+       USHORT usProtectedModeInfoOffset;
+       USHORT usConfigFilenameOffset;
+       USHORT usCRC_BlockOffset;
+       USHORT usBIOS_BootupMessageOffset;
+       USHORT usInt10Offset;
+       USHORT usPciBusDevInitCode;
+       USHORT usIoBaseAddress;
+       USHORT usSubsystemVendorID;
+       USHORT usSubsystemID;
+       USHORT usPCI_InfoOffset;
+       USHORT usMasterCommandTableOffset;      /*Offset for SW to get all command table offsets, Don't change the position */
+       USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
+       UCHAR ucExtendedFunctionCode;
+       UCHAR ucReserved;
+} ATOM_ROM_HEADER;
+
+/*==============================Command Table Portion==================================== */
+
+#ifdef UEFI_BUILD
+#define        UTEMP   USHORT
+#define        USHORT  void*
+#endif
+
+typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES {
+       USHORT ASIC_Init;       /* Function Table, used by various SW components,latest version 1.1 */
+       USHORT GetDisplaySurfaceSize;   /* Atomic Table,  Used by Bios when enabling HW ICON */
+       USHORT ASIC_RegistersInit;      /* Atomic Table,  indirectly used by various SW components,called from ASIC_Init */
+       USHORT VRAM_BlockVenderDetection;       /* Atomic Table,  used only by Bios */
+       USHORT DIGxEncoderControl;      /* Only used by Bios */
+       USHORT MemoryControllerInit;    /* Atomic Table,  indirectly used by various SW components,called from ASIC_Init */
+       USHORT EnableCRTCMemReq;        /* Function Table,directly used by various SW components,latest version 2.1 */
+       USHORT MemoryParamAdjust;       /* Atomic Table,  indirectly used by various SW components,called from SetMemoryClock if needed */
+       USHORT DVOEncoderControl;       /* Function Table,directly used by various SW components,latest version 1.2 */
+       USHORT GPIOPinControl;  /* Atomic Table,  only used by Bios */
+       USHORT SetEngineClock;  /*Function Table,directly used by various SW components,latest version 1.1 */
+       USHORT SetMemoryClock;  /* Function Table,directly used by various SW components,latest version 1.1 */
+       USHORT SetPixelClock;   /*Function Table,directly used by various SW components,latest version 1.2 */
+       USHORT DynamicClockGating;      /* Atomic Table,  indirectly used by various SW components,called from ASIC_Init */
+       USHORT ResetMemoryDLL;  /* Atomic Table,  indirectly used by various SW components,called from SetMemoryClock */
+       USHORT ResetMemoryDevice;       /* Atomic Table,  indirectly used by various SW components,called from SetMemoryClock */
+       USHORT MemoryPLLInit;
+       USHORT AdjustDisplayPll;        /* only used by Bios */
+       USHORT AdjustMemoryController;  /* Atomic Table,  indirectly used by various SW components,called from SetMemoryClock */
+       USHORT EnableASIC_StaticPwrMgt; /* Atomic Table,  only used by Bios */
+       USHORT ASIC_StaticPwrMgtStatusChange;   /* Obsolete, only used by Bios */
+       USHORT DAC_LoadDetection;       /* Atomic Table,  directly used by various SW components,latest version 1.2 */
+       USHORT LVTMAEncoderControl;     /* Atomic Table,directly used by various SW components,latest version 1.3 */
+       USHORT LCD1OutputControl;       /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT DAC1EncoderControl;      /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT DAC2EncoderControl;      /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT DVOOutputControl;        /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT CV1OutputControl;        /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT GetConditionalGoldenSetting;     /* only used by Bios */
+       USHORT TVEncoderControl;        /* Function Table,directly used by various SW components,latest version 1.1 */
+       USHORT TMDSAEncoderControl;     /* Atomic Table,  directly used by various SW components,latest version 1.3 */
+       USHORT LVDSEncoderControl;      /* Atomic Table,  directly used by various SW components,latest version 1.3 */
+       USHORT TV1OutputControl;        /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT EnableScaler;    /* Atomic Table,  used only by Bios */
+       USHORT BlankCRTC;       /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT EnableCRTC;      /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT GetPixelClock;   /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT EnableVGA_Render;        /* Function Table,directly used by various SW components,latest version 1.1 */
+       USHORT EnableVGA_Access;        /* Obsolete ,     only used by Bios */
+       USHORT SetCRTC_Timing;  /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT SetCRTC_OverScan;        /* Atomic Table,  used by various SW components,latest version 1.1 */
+       USHORT SetCRTC_Replication;     /* Atomic Table,  used only by Bios */
+       USHORT SelectCRTC_Source;       /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT EnableGraphSurfaces;     /* Atomic Table,  used only by Bios */
+       USHORT UpdateCRTC_DoubleBufferRegisters;
+       USHORT LUT_AutoFill;    /* Atomic Table,  only used by Bios */
+       USHORT EnableHW_IconCursor;     /* Atomic Table,  only used by Bios */
+       USHORT GetMemoryClock;  /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT GetEngineClock;  /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT SetCRTC_UsingDTDTiming;  /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT ExternalEncoderControl;  /* Atomic Table,  directly used by various SW components,latest version 2.1 */
+       USHORT LVTMAOutputControl;      /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT VRAM_BlockDetectionByStrap;      /* Atomic Table,  used only by Bios */
+       USHORT MemoryCleanUp;   /* Atomic Table,  only used by Bios */
+       USHORT ProcessI2cChannelTransaction;    /* Function Table,only used by Bios */
+       USHORT WriteOneByteToHWAssistedI2C;     /* Function Table,indirectly used by various SW components */
+       USHORT ReadHWAssistedI2CStatus; /* Atomic Table,  indirectly used by various SW components */
+       USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */
+       USHORT PowerConnectorDetection; /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT MC_Synchronization;      /* Atomic Table,  indirectly used by various SW components,called from SetMemoryClock */
+       USHORT ComputeMemoryEnginePLL;  /* Atomic Table,  indirectly used by various SW components,called from SetMemory/EngineClock */
+       USHORT MemoryRefreshConversion; /* Atomic Table,  indirectly used by various SW components,called from SetMemory or SetEngineClock */
+       USHORT VRAM_GetCurrentInfoBlock;        /* Atomic Table,  used only by Bios */
+       USHORT DynamicMemorySettings;   /* Atomic Table,  indirectly used by various SW components,called from SetMemoryClock */
+       USHORT MemoryTraining;  /* Atomic Table,  used only by Bios */
+       USHORT EnableSpreadSpectrumOnPPLL;      /* Atomic Table,  directly used by various SW components,latest version 1.2 */
+       USHORT TMDSAOutputControl;      /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT SetVoltage;      /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */
+       USHORT DAC1OutputControl;       /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT DAC2OutputControl;       /* Atomic Table,  directly used by various SW components,latest version 1.1 */
+       USHORT SetupHWAssistedI2CStatus;        /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */
+       USHORT ClockSource;     /* Atomic Table,  indirectly used by various SW components,called from ASIC_Init */
+       USHORT MemoryDeviceInit;        /* Atomic Table,  indirectly used by various SW components,called from SetMemoryClock */
+       USHORT EnableYUV;       /* Atomic Table,  indirectly used by various SW components,called from EnableVGARender */
+       USHORT DIG1EncoderControl;      /* Atomic Table,directly used by various SW components,latest version 1.1 */
+       USHORT DIG2EncoderControl;      /* Atomic Table,directly used by various SW components,latest version 1.1 */
+       USHORT DIG1TransmitterControl;  /* Atomic Table,directly used by various SW components,latest version 1.1 */
+       USHORT DIG2TransmitterControl;  /* Atomic Table,directly used by various SW components,latest version 1.1 */
+       USHORT ProcessAuxChannelTransaction;    /* Function Table,only used by Bios */
+       USHORT DPEncoderService;        /* Function Table,only used by Bios */
+} ATOM_MASTER_LIST_OF_COMMAND_TABLES;
+
+/*  For backward compatible */
+#define ReadEDIDFromHWAssistedI2C                ProcessI2cChannelTransaction
+#define UNIPHYTransmitterControl                                                    DIG1TransmitterControl
+#define LVTMATransmitterControl                                                             DIG2TransmitterControl
+#define SetCRTC_DPM_State                        GetConditionalGoldenSetting
+#define SetUniphyInstance                        ASIC_StaticPwrMgtStatusChange
+
+typedef struct _ATOM_MASTER_COMMAND_TABLE {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
+} ATOM_MASTER_COMMAND_TABLE;
+
+/****************************************************************************/
+/*  Structures used in every command table */
+/****************************************************************************/
+typedef struct _ATOM_TABLE_ATTRIBUTE {
+#if ATOM_BIG_ENDIAN
+       USHORT UpdatedByUtility:1;      /* [15]=Table updated by utility flag */
+       USHORT PS_SizeInBytes:7;        /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
+       USHORT WS_SizeInBytes:8;        /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
+#else
+       USHORT WS_SizeInBytes:8;        /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
+       USHORT PS_SizeInBytes:7;        /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
+       USHORT UpdatedByUtility:1;      /* [15]=Table updated by utility flag */
+#endif
+} ATOM_TABLE_ATTRIBUTE;
+
+typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS {
+       ATOM_TABLE_ATTRIBUTE sbfAccess;
+       USHORT susAccess;
+} ATOM_TABLE_ATTRIBUTE_ACCESS;
+
+/****************************************************************************/
+/*  Common header for all command tables. */
+/*  Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */
+/*  And the pointer actually points to this header. */
+/****************************************************************************/
+typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER {
+       ATOM_COMMON_TABLE_HEADER CommonHeader;
+       ATOM_TABLE_ATTRIBUTE TableAttribute;
+} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
+
+/****************************************************************************/
+/*  Structures used by ComputeMemoryEnginePLLTable */
+/****************************************************************************/
+#define COMPUTE_MEMORY_PLL_PARAM        1
+#define COMPUTE_ENGINE_PLL_PARAM        2
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS {
+       ULONG ulClock;          /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */
+       UCHAR ucAction;         /* 0:reserved //1:Memory //2:Engine */
+       UCHAR ucReserved;       /* may expand to return larger Fbdiv later */
+       UCHAR ucFbDiv;          /* return value */
+       UCHAR ucPostDiv;        /* return value */
+} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 {
+       ULONG ulClock;          /* When return, [23:0] return real clock */
+       UCHAR ucAction;         /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */
+       USHORT usFbDiv;         /* return Feedback value to be written to register */
+       UCHAR ucPostDiv;        /* return post div to be written to register */
+} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
+#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION   COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+
+#define SET_CLOCK_FREQ_MASK                     0x00FFFFFF     /* Clock change tables only take bit [23:0] as the requested clock value */
+#define USE_NON_BUS_CLOCK_MASK                  0x01000000     /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
+#define USE_MEMORY_SELF_REFRESH_MASK            0x02000000     /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
+#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE   0x04000000     /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
+#define FIRST_TIME_CHANGE_CLOCK                                                                        0x08000000      /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
+#define SKIP_SW_PROGRAM_PLL                                                                                    0x10000000      /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
+#define USE_SS_ENABLED_PIXEL_CLOCK  USE_NON_BUS_CLOCK_MASK
+
+#define b3USE_NON_BUS_CLOCK_MASK                  0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
+#define b3USE_MEMORY_SELF_REFRESH                 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
+#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE   0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
+#define b3FIRST_TIME_CHANGE_CLOCK                                                                      0x08    /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
+#define b3SKIP_SW_PROGRAM_PLL                                                                                  0x10    /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
+
+typedef struct _ATOM_COMPUTE_CLOCK_FREQ {
+#if ATOM_BIG_ENDIAN
+       ULONG ulComputeClockFlag:8;     /*  =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
+       ULONG ulClockFreq:24;   /*  in unit of 10kHz */
+#else
+       ULONG ulClockFreq:24;   /*  in unit of 10kHz */
+       ULONG ulComputeClockFlag:8;     /*  =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
+#endif
+} ATOM_COMPUTE_CLOCK_FREQ;
+
+typedef struct _ATOM_S_MPLL_FB_DIVIDER {
+       USHORT usFbDivFrac;
+       USHORT usFbDiv;
+} ATOM_S_MPLL_FB_DIVIDER;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 {
+       union {
+               ATOM_COMPUTE_CLOCK_FREQ ulClock;        /* Input Parameter */
+               ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */
+       };
+       UCHAR ucRefDiv;         /* Output Parameter */
+       UCHAR ucPostDiv;        /* Output Parameter */
+       UCHAR ucCntlFlag;       /* Output Parameter */
+       UCHAR ucReserved;
+} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+
+/*  ucCntlFlag */
+#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN          1
+#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE            2
+#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE         4
+
+typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER {
+       ATOM_COMPUTE_CLOCK_FREQ ulClock;
+       ULONG ulReserved[2];
+} DYNAMICE_MEMORY_SETTINGS_PARAMETER;
+
+typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER {
+       ATOM_COMPUTE_CLOCK_FREQ ulClock;
+       ULONG ulMemoryClock;
+       ULONG ulReserved;
+} DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+
+/****************************************************************************/
+/*  Structures used by SetEngineClockTable */
+/****************************************************************************/
+typedef struct _SET_ENGINE_CLOCK_PARAMETERS {
+       ULONG ulTargetEngineClock;      /* In 10Khz unit */
+} SET_ENGINE_CLOCK_PARAMETERS;
+
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION {
+       ULONG ulTargetEngineClock;      /* In 10Khz unit */
+       COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+} SET_ENGINE_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+/*  Structures used by SetMemoryClockTable */
+/****************************************************************************/
+typedef struct _SET_MEMORY_CLOCK_PARAMETERS {
+       ULONG ulTargetMemoryClock;      /* In 10Khz unit */
+} SET_MEMORY_CLOCK_PARAMETERS;
+
+typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION {
+       ULONG ulTargetMemoryClock;      /* In 10Khz unit */
+       COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+} SET_MEMORY_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+/*  Structures used by ASIC_Init.ctb */
+/****************************************************************************/
+typedef struct _ASIC_INIT_PARAMETERS {
+       ULONG ulDefaultEngineClock;     /* In 10Khz unit */
+       ULONG ulDefaultMemoryClock;     /* In 10Khz unit */
+} ASIC_INIT_PARAMETERS;
+
+typedef struct _ASIC_INIT_PS_ALLOCATION {
+       ASIC_INIT_PARAMETERS sASICInitClocks;
+       SET_ENGINE_CLOCK_PS_ALLOCATION sReserved;       /* Caller doesn't need to init this structure */
+} ASIC_INIT_PS_ALLOCATION;
+
+/****************************************************************************/
+/*  Structure used by DynamicClockGatingTable.ctb */
+/****************************************************************************/
+typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS {
+       UCHAR ucEnable;         /*  ATOM_ENABLE or ATOM_DISABLE */
+       UCHAR ucPadding[3];
+} DYNAMIC_CLOCK_GATING_PARAMETERS;
+#define  DYNAMIC_CLOCK_GATING_PS_ALLOCATION  DYNAMIC_CLOCK_GATING_PARAMETERS
+
+/****************************************************************************/
+/*  Structure used by EnableASIC_StaticPwrMgtTable.ctb */
+/****************************************************************************/
+typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS {
+       UCHAR ucEnable;         /*  ATOM_ENABLE or ATOM_DISABLE */
+       UCHAR ucPadding[3];
+} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
+#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION  ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by DAC_LoadDetectionTable.ctb */
+/****************************************************************************/
+typedef struct _DAC_LOAD_DETECTION_PARAMETERS {
+       USHORT usDeviceID;      /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */
+       UCHAR ucDacType;        /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */
+       UCHAR ucMisc;           /* Valid only when table revision =1.3 and above */
+} DAC_LOAD_DETECTION_PARAMETERS;
+
+/*  DAC_LOAD_DETECTION_PARAMETERS.ucMisc */
+#define DAC_LOAD_MISC_YPrPb                                            0x01
+
+typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION {
+       DAC_LOAD_DETECTION_PARAMETERS sDacload;
+       ULONG Reserved[2];      /*  Don't set this one, allocation for EXT DAC */
+} DAC_LOAD_DETECTION_PS_ALLOCATION;
+
+/****************************************************************************/
+/*  Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */
+/****************************************************************************/
+typedef struct _DAC_ENCODER_CONTROL_PARAMETERS {
+       USHORT usPixelClock;    /*  in 10KHz; for bios convenient */
+       UCHAR ucDacStandard;    /*  See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */
+       UCHAR ucAction;         /*  0: turn off encoder */
+       /*  1: setup and turn on encoder */
+       /*  7: ATOM_ENCODER_INIT Initialize DAC */
+} DAC_ENCODER_CONTROL_PARAMETERS;
+
+#define DAC_ENCODER_CONTROL_PS_ALLOCATION  DAC_ENCODER_CONTROL_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by DIG1EncoderControlTable */
+/*                     DIG2EncoderControlTable */
+/*                     ExternalEncoderControlTable */
+/****************************************************************************/
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
+       USHORT usPixelClock;    /*  in 10KHz; for bios convenient */
+       UCHAR ucConfig;
+       /*  [2] Link Select: */
+       /*  =0: PHY linkA if bfLane<3 */
+       /*  =1: PHY linkB if bfLanes<3 */
+       /*  =0: PHY linkA+B if bfLanes=3 */
+       /*  [3] Transmitter Sel */
+       /*  =0: UNIPHY or PCIEPHY */
+       /*  =1: LVTMA */
+       UCHAR ucAction;         /*  =0: turn off encoder */
+       /*  =1: turn on encoder */
+       UCHAR ucEncoderMode;
+       /*  =0: DP   encoder */
+       /*  =1: LVDS encoder */
+       /*  =2: DVI  encoder */
+       /*  =3: HDMI encoder */
+       /*  =4: SDVO encoder */
+       UCHAR ucLaneNum;        /*  how many lanes to enable */
+       UCHAR ucReserved[2];
+} DIG_ENCODER_CONTROL_PARAMETERS;
+#define DIG_ENCODER_CONTROL_PS_ALLOCATION                        DIG_ENCODER_CONTROL_PARAMETERS
+#define EXTERNAL_ENCODER_CONTROL_PARAMETER                     DIG_ENCODER_CONTROL_PARAMETERS
+
+/* ucConfig */
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK                            0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ         0x00
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ         0x01
+#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK                                0x04
+#define ATOM_ENCODER_CONFIG_LINKA                                                                0x00
+#define ATOM_ENCODER_CONFIG_LINKB                                                                0x04
+#define ATOM_ENCODER_CONFIG_LINKA_B                                                      ATOM_TRANSMITTER_CONFIG_LINKA
+#define ATOM_ENCODER_CONFIG_LINKB_A                                                      ATOM_ENCODER_CONFIG_LINKB
+#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK       0x08
+#define ATOM_ENCODER_CONFIG_UNIPHY                                                       0x00
+#define ATOM_ENCODER_CONFIG_LVTMA                                                                0x08
+#define ATOM_ENCODER_CONFIG_TRANSMITTER1                                 0x00
+#define ATOM_ENCODER_CONFIG_TRANSMITTER2                                 0x08
+#define ATOM_ENCODER_CONFIG_DIGB                                                                 0x80  /*  VBIOS Internal use, outside SW should set this bit=0 */
+/*  ucAction */
+/*  ATOM_ENABLE:  Enable Encoder */
+/*  ATOM_DISABLE: Disable Encoder */
+
+/* ucEncoderMode */
+#define ATOM_ENCODER_MODE_DP                                                                                   0
+#define ATOM_ENCODER_MODE_LVDS                                                                         1
+#define ATOM_ENCODER_MODE_DVI                                                                                  2
+#define ATOM_ENCODER_MODE_HDMI                                                                         3
+#define ATOM_ENCODER_MODE_SDVO                                                                         4
+#define ATOM_ENCODER_MODE_TV                                                                                   13
+#define ATOM_ENCODER_MODE_CV                                                                                   14
+#define ATOM_ENCODER_MODE_CRT                                                                                  15
+
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 {
+#if ATOM_BIG_ENDIAN
+       UCHAR ucReserved1:2;
+       UCHAR ucTransmitterSel:2;       /*  =0: UniphyAB, =1: UniphyCD  =2: UniphyEF */
+       UCHAR ucLinkSel:1;      /*  =0: linkA/C/E =1: linkB/D/F */
+       UCHAR ucReserved:1;
+       UCHAR ucDPLinkRate:1;   /*  =0: 1.62Ghz, =1: 2.7Ghz */
+#else
+       UCHAR ucDPLinkRate:1;   /*  =0: 1.62Ghz, =1: 2.7Ghz */
+       UCHAR ucReserved:1;
+       UCHAR ucLinkSel:1;      /*  =0: linkA/C/E =1: linkB/D/F */
+       UCHAR ucTransmitterSel:2;       /*  =0: UniphyAB, =1: UniphyCD  =2: UniphyEF */
+       UCHAR ucReserved1:2;
+#endif
+} ATOM_DIG_ENCODER_CONFIG_V2;
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
+       USHORT usPixelClock;    /*  in 10KHz; for bios convenient */
+       ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
+       UCHAR ucAction;
+       UCHAR ucEncoderMode;
+       /*  =0: DP   encoder */
+       /*  =1: LVDS encoder */
+       /*  =2: DVI  encoder */
+       /*  =3: HDMI encoder */
+       /*  =4: SDVO encoder */
+       UCHAR ucLaneNum;        /*  how many lanes to enable */
+       UCHAR ucReserved[2];
+} DIG_ENCODER_CONTROL_PARAMETERS_V2;
+
+/* ucConfig */
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK                         0x01
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ                0x00
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ                0x01
+#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK                             0x04
+#define ATOM_ENCODER_CONFIG_V2_LINKA                                                             0x00
+#define ATOM_ENCODER_CONFIG_V2_LINKB                                                             0x04
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK      0x18
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1                                0x00
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2                                0x08
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3                                0x10
+
+/****************************************************************************/
+/*  Structures used by UNIPHYTransmitterControlTable */
+/*                     LVTMATransmitterControlTable */
+/*                     DVOOutputControlTable */
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE {
+       UCHAR ucLaneSel;
+       UCHAR ucLaneSet;
+} ATOM_DP_VS_MODE;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
+       union {
+               USHORT usPixelClock;    /*  in 10KHz; for bios convenient */
+               USHORT usInitInfo;      /*  when init uniphy,lower 8bit is used for connector type defined in objectid.h */
+               ATOM_DP_VS_MODE asMode; /*  DP Voltage swing mode */
+       };
+       UCHAR ucConfig;
+       /*  [0]=0: 4 lane Link, */
+       /*     =1: 8 lane Link ( Dual Links TMDS ) */
+       /*  [1]=0: InCoherent mode */
+       /*     =1: Coherent Mode */
+       /*  [2] Link Select: */
+       /*  =0: PHY linkA   if bfLane<3 */
+       /*  =1: PHY linkB   if bfLanes<3 */
+       /*  =0: PHY linkA+B if bfLanes=3 */
+       /*  [5:4]PCIE lane Sel */
+       /*  =0: lane 0~3 or 0~7 */
+       /*  =1: lane 4~7 */
+       /*  =2: lane 8~11 or 8~15 */
+       /*  =3: lane 12~15 */
+       UCHAR ucAction;         /*  =0: turn off encoder */
+       /*  =1: turn on encoder */
+       UCHAR ucReserved[4];
+} DIG_TRANSMITTER_CONTROL_PARAMETERS;
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION          DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+/* ucInitInfo */
+#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
+
+/* ucConfig */
+#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK                     0x01
+#define ATOM_TRANSMITTER_CONFIG_COHERENT                               0x02
+#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK          0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA                                          0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB                                          0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA_B                                        0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB_A                                        0x04
+
+#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK       0x08    /*  only used when ATOM_TRANSMITTER_ACTION_ENABLE */
+#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER           0x00    /*  only used when ATOM_TRANSMITTER_ACTION_ENABLE */
+#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER           0x08    /*  only used when ATOM_TRANSMITTER_ACTION_ENABLE */
+
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK                    0x30
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL                    0x00
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE                    0x20
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN          0x30
+#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK          0xc0
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_3                               0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_7                               0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_4_7                               0x40
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_11                              0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_15                              0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_12_15                     0xc0
+
+/* ucAction */
+#define ATOM_TRANSMITTER_ACTION_DISABLE                                               0
+#define ATOM_TRANSMITTER_ACTION_ENABLE                                        1
+#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF                                     2
+#define ATOM_TRANSMITTER_ACTION_LCD_BLON                                      3
+#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL  4
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START              5
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP                       6
+#define ATOM_TRANSMITTER_ACTION_INIT                                                  7
+#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT        8
+#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT                 9
+#define ATOM_TRANSMITTER_ACTION_SETUP                                                 10
+#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH           11
+
+/*  Following are used for DigTransmitterControlTable ver1.2 */
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 {
+#if ATOM_BIG_ENDIAN
+       UCHAR ucTransmitterSel:2;       /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
+       /*         =1 Dig Transmitter 2 ( Uniphy CD ) */
+       /*         =2 Dig Transmitter 3 ( Uniphy EF ) */
+       UCHAR ucReserved:1;
+       UCHAR fDPConnector:1;   /* bit4=0: DP connector  =1: None DP connector */
+       UCHAR ucEncoderSel:1;   /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
+       UCHAR ucLinkSel:1;      /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
+       /*     =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
+
+       UCHAR fCoherentMode:1;  /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
+       UCHAR fDualLinkConnector:1;     /* bit0=1: Dual Link DVI connector */
+#else
+       UCHAR fDualLinkConnector:1;     /* bit0=1: Dual Link DVI connector */
+       UCHAR fCoherentMode:1;  /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
+       UCHAR ucLinkSel:1;      /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
+       /*     =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
+       UCHAR ucEncoderSel:1;   /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
+       UCHAR fDPConnector:1;   /* bit4=0: DP connector  =1: None DP connector */
+       UCHAR ucReserved:1;
+       UCHAR ucTransmitterSel:2;       /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
+       /*         =1 Dig Transmitter 2 ( Uniphy CD ) */
+       /*         =2 Dig Transmitter 3 ( Uniphy EF ) */
+#endif
+} ATOM_DIG_TRANSMITTER_CONFIG_V2;
+
+/* ucConfig */
+/* Bit0 */
+#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR                 0x01
+
+/* Bit1 */
+#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT                                      0x02
+
+/* Bit2 */
+#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK                       0x04
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKA                                   0x00
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKB                                           0x04
+
+/*  Bit3 */
+#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK            0x08
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER                          0x00  /*  only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER                          0x08  /*  only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
+
+/*  Bit4 */
+#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR                         0x10
+
+/*  Bit7:6 */
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1                        0x00    /* AB */
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2                        0x40    /* CD */
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3                        0x80    /* EF */
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 {
+       union {
+               USHORT usPixelClock;    /*  in 10KHz; for bios convenient */
+               USHORT usInitInfo;      /*  when init uniphy,lower 8bit is used for connector type defined in objectid.h */
+               ATOM_DP_VS_MODE asMode; /*  DP Voltage swing mode */
+       };
+       ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
+       UCHAR ucAction;         /*  define as ATOM_TRANSMITER_ACTION_XXX */
+       UCHAR ucReserved[4];
+} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
+
+/****************************************************************************/
+/*  Structures used by DAC1OuputControlTable */
+/*                     DAC2OuputControlTable */
+/*                     LVTMAOutputControlTable  (Before DEC30) */
+/*                     TMDSAOutputControlTable  (Before DEC30) */
+/****************************************************************************/
+typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
+       UCHAR ucAction;         /*  Possible input:ATOM_ENABLE||ATOMDISABLE */
+       /*  When the display is LCD, in addition to above: */
+       /*  ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */
+       /*  ATOM_LCD_SELFTEST_STOP */
+
+       UCHAR aucPadding[3];    /*  padding to DWORD aligned */
+} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
+
+#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+
+#define CRT1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CRT2_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CV1_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CV1_OUTPUT_CONTROL_PS_ALLOCATION   DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define TV1_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define TV1_OUTPUT_CONTROL_PS_ALLOCATION   DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP2_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define LCD1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DVO_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DVO_OUTPUT_CONTROL_PS_ALLOCATION   DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
+#define DVO_OUTPUT_CONTROL_PARAMETERS_V3        DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by BlankCRTCTable */
+/****************************************************************************/
+typedef struct _BLANK_CRTC_PARAMETERS {
+       UCHAR ucCRTC;           /*  ATOM_CRTC1 or ATOM_CRTC2 */
+       UCHAR ucBlanking;       /*  ATOM_BLANKING or ATOM_BLANKINGOFF */
+       USHORT usBlackColorRCr;
+       USHORT usBlackColorGY;
+       USHORT usBlackColorBCb;
+} BLANK_CRTC_PARAMETERS;
+#define BLANK_CRTC_PS_ALLOCATION    BLANK_CRTC_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by EnableCRTCTable */
+/*                     EnableCRTCMemReqTable */
+/*                     UpdateCRTC_DoubleBufferRegistersTable */
+/****************************************************************************/
+typedef struct _ENABLE_CRTC_PARAMETERS {
+       UCHAR ucCRTC;           /*  ATOM_CRTC1 or ATOM_CRTC2 */
+       UCHAR ucEnable;         /*  ATOM_ENABLE or ATOM_DISABLE */
+       UCHAR ucPadding[2];
+} ENABLE_CRTC_PARAMETERS;
+#define ENABLE_CRTC_PS_ALLOCATION   ENABLE_CRTC_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by SetCRTC_OverScanTable */
+/****************************************************************************/
+typedef struct _SET_CRTC_OVERSCAN_PARAMETERS {
+       USHORT usOverscanRight; /*  right */
+       USHORT usOverscanLeft;  /*  left */
+       USHORT usOverscanBottom;        /*  bottom */
+       USHORT usOverscanTop;   /*  top */
+       UCHAR ucCRTC;           /*  ATOM_CRTC1 or ATOM_CRTC2 */
+       UCHAR ucPadding[3];
+} SET_CRTC_OVERSCAN_PARAMETERS;
+#define SET_CRTC_OVERSCAN_PS_ALLOCATION  SET_CRTC_OVERSCAN_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by SetCRTC_ReplicationTable */
+/****************************************************************************/
+typedef struct _SET_CRTC_REPLICATION_PARAMETERS {
+       UCHAR ucH_Replication;  /*  horizontal replication */
+       UCHAR ucV_Replication;  /*  vertical replication */
+       UCHAR usCRTC;           /*  ATOM_CRTC1 or ATOM_CRTC2 */
+       UCHAR ucPadding;
+} SET_CRTC_REPLICATION_PARAMETERS;
+#define SET_CRTC_REPLICATION_PS_ALLOCATION  SET_CRTC_REPLICATION_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by SelectCRTC_SourceTable */
+/****************************************************************************/
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS {
+       UCHAR ucCRTC;           /*  ATOM_CRTC1 or ATOM_CRTC2 */
+       UCHAR ucDevice;         /*  ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */
+       UCHAR ucPadding[2];
+} SELECT_CRTC_SOURCE_PARAMETERS;
+#define SELECT_CRTC_SOURCE_PS_ALLOCATION  SELECT_CRTC_SOURCE_PARAMETERS
+
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 {
+       UCHAR ucCRTC;           /*  ATOM_CRTC1 or ATOM_CRTC2 */
+       UCHAR ucEncoderID;      /*  DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */
+       UCHAR ucEncodeMode;     /*  Encoding mode, only valid when using DIG1/DIG2/DVO */
+       UCHAR ucPadding;
+} SELECT_CRTC_SOURCE_PARAMETERS_V2;
+
+/* ucEncoderID */
+/* #define ASIC_INT_DAC1_ENCODER_ID                                              0x00 */
+/* #define ASIC_INT_TV_ENCODER_ID                                                                        0x02 */
+/* #define ASIC_INT_DIG1_ENCODER_ID                                                              0x03 */
+/* #define ASIC_INT_DAC2_ENCODER_ID                                                              0x04 */
+/* #define ASIC_EXT_TV_ENCODER_ID                                                                        0x06 */
+/* #define ASIC_INT_DVO_ENCODER_ID                                                                       0x07 */
+/* #define ASIC_INT_DIG2_ENCODER_ID                                                              0x09 */
+/* #define ASIC_EXT_DIG_ENCODER_ID                                                                       0x05 */
+
+/* ucEncodeMode */
+/* #define ATOM_ENCODER_MODE_DP                                                                          0 */
+/* #define ATOM_ENCODER_MODE_LVDS                                                                        1 */
+/* #define ATOM_ENCODER_MODE_DVI                                                                         2 */
+/* #define ATOM_ENCODER_MODE_HDMI                                                                        3 */
+/* #define ATOM_ENCODER_MODE_SDVO                                                                        4 */
+/* #define ATOM_ENCODER_MODE_TV                                                                          13 */
+/* #define ATOM_ENCODER_MODE_CV                                                                          14 */
+/* #define ATOM_ENCODER_MODE_CRT                                                                         15 */
+
+/****************************************************************************/
+/*  Structures used by SetPixelClockTable */
+/*                     GetPixelClockTable */
+/****************************************************************************/
+/* Major revision=1., Minor revision=1 */
+typedef struct _PIXEL_CLOCK_PARAMETERS {
+       USHORT usPixelClock;    /*  in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
+       /*  0 means disable PPLL */
+       USHORT usRefDiv;        /*  Reference divider */
+       USHORT usFbDiv;         /*  feedback divider */
+       UCHAR ucPostDiv;        /*  post divider */
+       UCHAR ucFracFbDiv;      /*  fractional feedback divider */
+       UCHAR ucPpll;           /*  ATOM_PPLL1 or ATOM_PPL2 */
+       UCHAR ucRefDivSrc;      /*  ATOM_PJITTER or ATO_NONPJITTER */
+       UCHAR ucCRTC;           /*  Which CRTC uses this Ppll */
+       UCHAR ucPadding;
+} PIXEL_CLOCK_PARAMETERS;
+
+/* Major revision=1., Minor revision=2, add ucMiscIfno */
+/* ucMiscInfo: */
+#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
+#define MISC_DEVICE_INDEX_MASK        0xF0
+#define MISC_DEVICE_INDEX_SHIFT       4
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V2 {
+       USHORT usPixelClock;    /*  in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
+       /*  0 means disable PPLL */
+       USHORT usRefDiv;        /*  Reference divider */
+       USHORT usFbDiv;         /*  feedback divider */
+       UCHAR ucPostDiv;        /*  post divider */
+       UCHAR ucFracFbDiv;      /*  fractional feedback divider */
+       UCHAR ucPpll;           /*  ATOM_PPLL1 or ATOM_PPL2 */
+       UCHAR ucRefDivSrc;      /*  ATOM_PJITTER or ATO_NONPJITTER */
+       UCHAR ucCRTC;           /*  Which CRTC uses this Ppll */
+       UCHAR ucMiscInfo;       /*  Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */
+} PIXEL_CLOCK_PARAMETERS_V2;
+
+/* Major revision=1., Minor revision=3, structure/definition change */
+/* ucEncoderMode: */
+/* ATOM_ENCODER_MODE_DP */
+/* ATOM_ENOCDER_MODE_LVDS */
+/* ATOM_ENOCDER_MODE_DVI */
+/* ATOM_ENOCDER_MODE_HDMI */
+/* ATOM_ENOCDER_MODE_SDVO */
+/* ATOM_ENCODER_MODE_TV                                                                          13 */
+/* ATOM_ENCODER_MODE_CV                                                                          14 */
+/* ATOM_ENCODER_MODE_CRT                                                                         15 */
+
+/* ucDVOConfig */
+/* #define DVO_ENCODER_CONFIG_RATE_SEL                                                   0x01 */
+/* #define DVO_ENCODER_CONFIG_DDR_SPEED                                          0x00 */
+/* #define DVO_ENCODER_CONFIG_SDR_SPEED                                          0x01 */
+/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL                                         0x0c */
+/* #define DVO_ENCODER_CONFIG_LOW12BIT                                                   0x00 */
+/* #define DVO_ENCODER_CONFIG_UPPER12BIT                                         0x04 */
+/* #define DVO_ENCODER_CONFIG_24BIT                                                              0x08 */
+
+/* ucMiscInfo: also changed, see below */
+#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL                                               0x01
+#define PIXEL_CLOCK_MISC_VGA_MODE                                                                              0x02
+#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK                                                 0x04
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1                                                        0x00
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2                                                        0x04
+#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK                        0x08
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V3 {
+       USHORT usPixelClock;    /*  in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
+       /*  0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */
+       USHORT usRefDiv;        /*  Reference divider */
+       USHORT usFbDiv;         /*  feedback divider */
+       UCHAR ucPostDiv;        /*  post divider */
+       UCHAR ucFracFbDiv;      /*  fractional feedback divider */
+       UCHAR ucPpll;           /*  ATOM_PPLL1 or ATOM_PPL2 */
+       UCHAR ucTransmitterId;  /*  graphic encoder id defined in objectId.h */
+       union {
+               UCHAR ucEncoderMode;    /*  encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */
+               UCHAR ucDVOConfig;      /*  when use DVO, need to know SDR/DDR, 12bit or 24bit */
+       };
+       UCHAR ucMiscInfo;       /*  bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */
+       /*  bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */
+} PIXEL_CLOCK_PARAMETERS_V3;
+
+#define PIXEL_CLOCK_PARAMETERS_LAST                    PIXEL_CLOCK_PARAMETERS_V2
+#define GET_PIXEL_CLOCK_PS_ALLOCATION          PIXEL_CLOCK_PARAMETERS_LAST
+
+/****************************************************************************/
+/*  Structures used by AdjustDisplayPllTable */
+/****************************************************************************/
+typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS {
+       USHORT usPixelClock;
+       UCHAR ucTransmitterID;
+       UCHAR ucEncodeMode;
+       union {
+               UCHAR ucDVOConfig;      /* if DVO, need passing link rate and output 12bitlow or 24bit */
+               UCHAR ucConfig; /* if none DVO, not defined yet */
+       };
+       UCHAR ucReserved[3];
+} ADJUST_DISPLAY_PLL_PARAMETERS;
+
+#define ADJUST_DISPLAY_CONFIG_SS_ENABLE       0x10
+
+#define ADJUST_DISPLAY_PLL_PS_ALLOCATION                       ADJUST_DISPLAY_PLL_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by EnableYUVTable */
+/****************************************************************************/
+typedef struct _ENABLE_YUV_PARAMETERS {
+       UCHAR ucEnable;         /*  ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */
+       UCHAR ucCRTC;           /*  Which CRTC needs this YUV or RGB format */
+       UCHAR ucPadding[2];
+} ENABLE_YUV_PARAMETERS;
+#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by GetMemoryClockTable */
+/****************************************************************************/
+typedef struct _GET_MEMORY_CLOCK_PARAMETERS {
+       ULONG ulReturnMemoryClock;      /*  current memory speed in 10KHz unit */
+} GET_MEMORY_CLOCK_PARAMETERS;
+#define GET_MEMORY_CLOCK_PS_ALLOCATION  GET_MEMORY_CLOCK_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by GetEngineClockTable */
+/****************************************************************************/
+typedef struct _GET_ENGINE_CLOCK_PARAMETERS {
+       ULONG ulReturnEngineClock;      /*  current engine speed in 10KHz unit */
+} GET_ENGINE_CLOCK_PARAMETERS;
+#define GET_ENGINE_CLOCK_PS_ALLOCATION  GET_ENGINE_CLOCK_PARAMETERS
+
+/****************************************************************************/
+/*  Following Structures and constant may be obsolete */
+/****************************************************************************/
+/* Maxium 8 bytes,the data read in will be placed in the parameter space. */
+/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */
+typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS {
+       USHORT usPrescale;      /* Ratio between Engine clock and I2C clock */
+       USHORT usVRAMAddress;   /* Adress in Frame Buffer where to pace raw EDID */
+       USHORT usStatus;        /* When use output: lower byte EDID checksum, high byte hardware status */
+       /* WHen use input:  lower byte as 'byte to read':currently limited to 128byte or 1byte */
+       UCHAR ucSlaveAddr;      /* Read from which slave */
+       UCHAR ucLineNumber;     /* Read from which HW assisted line */
+} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
+#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION  READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+
+#define  ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE                  0
+#define  ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES              1
+#define  ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK       2
+#define  ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK  3
+#define  ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK       4
+
+typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS {
+       USHORT usPrescale;      /* Ratio between Engine clock and I2C clock */
+       USHORT usByteOffset;    /* Write to which byte */
+       /* Upper portion of usByteOffset is Format of data */
+       /* 1bytePS+offsetPS */
+       /* 2bytesPS+offsetPS */
+       /* blockID+offsetPS */
+       /* blockID+offsetID */
+       /* blockID+counterID+offsetID */
+       UCHAR ucData;           /* PS data1 */
+       UCHAR ucStatus;         /* Status byte 1=success, 2=failure, Also is used as PS data2 */
+       UCHAR ucSlaveAddr;      /* Write to which slave */
+       UCHAR ucLineNumber;     /* Write from which HW assisted line */
+} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
+
+#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION  WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS {
+       USHORT usPrescale;      /* Ratio between Engine clock and I2C clock */
+       UCHAR ucSlaveAddr;      /* Write to which slave */
+       UCHAR ucLineNumber;     /* Write from which HW assisted line */
+} SET_UP_HW_I2C_DATA_PARAMETERS;
+
+/**************************************************************************/
+#define SPEED_FAN_CONTROL_PS_ALLOCATION   WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+/****************************************************************************/
+/*  Structures used by PowerConnectorDetectionTable */
+/****************************************************************************/
+typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS {
+       UCHAR ucPowerConnectorStatus;   /* Used for return value 0: detected, 1:not detected */
+       UCHAR ucPwrBehaviorId;
+       USHORT usPwrBudget;     /* how much power currently boot to in unit of watt */
+} POWER_CONNECTOR_DETECTION_PARAMETERS;
+
+typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION {
+       UCHAR ucPowerConnectorStatus;   /* Used for return value 0: detected, 1:not detected */
+       UCHAR ucReserved;
+       USHORT usPwrBudget;     /* how much power currently boot to in unit of watt */
+       WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+} POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
+
+/****************************LVDS SS Command Table Definitions**********************/
+
+/****************************************************************************/
+/*  Structures used by EnableSpreadSpectrumOnPPLLTable */
+/****************************************************************************/
+typedef struct _ENABLE_LVDS_SS_PARAMETERS {
+       USHORT usSpreadSpectrumPercentage;
+       UCHAR ucSpreadSpectrumType;     /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
+       UCHAR ucSpreadSpectrumStepSize_Delay;   /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */
+       UCHAR ucEnable;         /* ATOM_ENABLE or ATOM_DISABLE */
+       UCHAR ucPadding[3];
+} ENABLE_LVDS_SS_PARAMETERS;
+
+/* ucTableFormatRevision=1,ucTableContentRevision=2 */
+typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 {
+       USHORT usSpreadSpectrumPercentage;
+       UCHAR ucSpreadSpectrumType;     /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
+       UCHAR ucSpreadSpectrumStep;     /*  */
+       UCHAR ucEnable;         /* ATOM_ENABLE or ATOM_DISABLE */
+       UCHAR ucSpreadSpectrumDelay;
+       UCHAR ucSpreadSpectrumRange;
+       UCHAR ucPadding;
+} ENABLE_LVDS_SS_PARAMETERS_V2;
+
+/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL {
+       USHORT usSpreadSpectrumPercentage;
+       UCHAR ucSpreadSpectrumType;     /*  Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
+       UCHAR ucSpreadSpectrumStep;     /*  */
+       UCHAR ucEnable;         /*  ATOM_ENABLE or ATOM_DISABLE */
+       UCHAR ucSpreadSpectrumDelay;
+       UCHAR ucSpreadSpectrumRange;
+       UCHAR ucPpll;           /*  ATOM_PPLL1/ATOM_PPLL2 */
+} ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+
+#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION  ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+/**************************************************************************/
+
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION {
+       PIXEL_CLOCK_PARAMETERS sPCLKInput;
+       ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;       /* Caller doesn't need to init this portion */
+} SET_PIXEL_CLOCK_PS_ALLOCATION;
+
+#define ENABLE_VGA_RENDER_PS_ALLOCATION   SET_PIXEL_CLOCK_PS_ALLOCATION
+
+/****************************************************************************/
+/*  Structures used by ### */
+/****************************************************************************/
+typedef struct _MEMORY_TRAINING_PARAMETERS {
+       ULONG ulTargetMemoryClock;      /* In 10Khz unit */
+} MEMORY_TRAINING_PARAMETERS;
+#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
+
+/****************************LVDS and other encoder command table definitions **********************/
+
+/****************************************************************************/
+/*  Structures used by LVDSEncoderControlTable   (Before DCE30) */
+/*                     LVTMAEncoderControlTable  (Before DCE30) */
+/*                     TMDSAEncoderControlTable  (Before DCE30) */
+/****************************************************************************/
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
+       USHORT usPixelClock;    /*  in 10KHz; for bios convenient */
+       UCHAR ucMisc;           /*  bit0=0: Enable single link */
+       /*      =1: Enable dual link */
+       /*  Bit1=0: 666RGB */
+       /*      =1: 888RGB */
+       UCHAR ucAction;         /*  0: turn off encoder */
+       /*  1: setup and turn on encoder */
+} LVDS_ENCODER_CONTROL_PARAMETERS;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION  LVDS_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS    LVDS_ENCODER_CONTROL_PARAMETERS
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS    TMDS1_ENCODER_CONTROL_PARAMETERS
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
+
+/* ucTableFormatRevision=1,ucTableContentRevision=2 */
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
+       USHORT usPixelClock;    /*  in 10KHz; for bios convenient */
+       UCHAR ucMisc;           /*  see PANEL_ENCODER_MISC_xx defintions below */
+       UCHAR ucAction;         /*  0: turn off encoder */
+       /*  1: setup and turn on encoder */
+       UCHAR ucTruncate;       /*  bit0=0: Disable truncate */
+       /*      =1: Enable truncate */
+       /*  bit4=0: 666RGB */
+       /*      =1: 888RGB */
+       UCHAR ucSpatial;        /*  bit0=0: Disable spatial dithering */
+       /*      =1: Enable spatial dithering */
+       /*  bit4=0: 666RGB */
+       /*      =1: 888RGB */
+       UCHAR ucTemporal;       /*  bit0=0: Disable temporal dithering */
+       /*      =1: Enable temporal dithering */
+       /*  bit4=0: 666RGB */
+       /*      =1: 888RGB */
+       /*  bit5=0: Gray level 2 */
+       /*      =1: Gray level 4 */
+       UCHAR ucFRC;            /*  bit4=0: 25FRC_SEL pattern E */
+       /*      =1: 25FRC_SEL pattern F */
+       /*  bit6:5=0: 50FRC_SEL pattern A */
+       /*        =1: 50FRC_SEL pattern B */
+       /*        =2: 50FRC_SEL pattern C */
+       /*        =3: 50FRC_SEL pattern D */
+       /*  bit7=0: 75FRC_SEL pattern E */
+       /*      =1: 75FRC_SEL pattern F */
+} LVDS_ENCODER_CONTROL_PARAMETERS_V2;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2  LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2    LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2    TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
+
+#define LVDS_ENCODER_CONTROL_PARAMETERS_V3     LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3  LVDS_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
+
+/****************************************************************************/
+/*  Structures used by ### */
+/****************************************************************************/
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS {
+       UCHAR ucEnable;         /*  Enable or Disable External TMDS encoder */
+       UCHAR ucMisc;           /*  Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */
+       UCHAR ucPadding[2];
+} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION {
+       ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
+       WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;     /* Caller doesn't need to init this portion */
+} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
+
+#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2  LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 {
+       ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
+       WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;     /* Caller doesn't need to init this portion */
+} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
+       DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
+       WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
+
+/****************************************************************************/
+/*  Structures used by DVOEncoderControlTable */
+/****************************************************************************/
+/* ucTableFormatRevision=1,ucTableContentRevision=3 */
+
+/* ucDVOConfig: */
+#define DVO_ENCODER_CONFIG_RATE_SEL                                                    0x01
+#define DVO_ENCODER_CONFIG_DDR_SPEED                                           0x00
+#define DVO_ENCODER_CONFIG_SDR_SPEED                                           0x01
+#define DVO_ENCODER_CONFIG_OUTPUT_SEL                                          0x0c
+#define DVO_ENCODER_CONFIG_LOW12BIT                                                    0x00
+#define DVO_ENCODER_CONFIG_UPPER12BIT                                          0x04
+#define DVO_ENCODER_CONFIG_24BIT                                                               0x08
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
+       USHORT usPixelClock;
+       UCHAR ucDVOConfig;
+       UCHAR ucAction;         /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
+       UCHAR ucReseved[4];
+} DVO_ENCODER_CONTROL_PARAMETERS_V3;
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3   DVO_ENCODER_CONTROL_PARAMETERS_V3
+
+/* ucTableFormatRevision=1 */
+/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */
+/*  bit1=0: non-coherent mode */
+/*      =1: coherent mode */
+
+/* ========================================================================================== */
+/* Only change is here next time when changing encoder parameter definitions again! */
+#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST     LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST  LVDS_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define DVO_ENCODER_CONTROL_PARAMETERS_LAST      DVO_ENCODER_CONTROL_PARAMETERS
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST   DVO_ENCODER_CONTROL_PS_ALLOCATION
+
+/* ========================================================================================== */
+#define PANEL_ENCODER_MISC_DUAL                0x01
+#define PANEL_ENCODER_MISC_COHERENT            0x02
+#define        PANEL_ENCODER_MISC_TMDS_LINKB                                    0x04
+#define        PANEL_ENCODER_MISC_HDMI_TYPE                                     0x08
+
+#define PANEL_ENCODER_ACTION_DISABLE           ATOM_DISABLE
+#define PANEL_ENCODER_ACTION_ENABLE            ATOM_ENABLE
+#define PANEL_ENCODER_ACTION_COHERENTSEQ       (ATOM_ENABLE+1)
+
+#define PANEL_ENCODER_TRUNCATE_EN              0x01
+#define PANEL_ENCODER_TRUNCATE_DEPTH           0x10
+#define PANEL_ENCODER_SPATIAL_DITHER_EN        0x01
+#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH     0x10
+#define PANEL_ENCODER_TEMPORAL_DITHER_EN       0x01
+#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH    0x10
+#define PANEL_ENCODER_TEMPORAL_LEVEL_4         0x20
+#define PANEL_ENCODER_25FRC_MASK               0x10
+#define PANEL_ENCODER_25FRC_E                  0x00
+#define PANEL_ENCODER_25FRC_F                  0x10
+#define PANEL_ENCODER_50FRC_MASK               0x60
+#define PANEL_ENCODER_50FRC_A                  0x00
+#define PANEL_ENCODER_50FRC_B                  0x20
+#define PANEL_ENCODER_50FRC_C                  0x40
+#define PANEL_ENCODER_50FRC_D                  0x60
+#define PANEL_ENCODER_75FRC_MASK               0x80
+#define PANEL_ENCODER_75FRC_E                  0x00
+#define PANEL_ENCODER_75FRC_F                  0x80
+
+/****************************************************************************/
+/*  Structures used by SetVoltageTable */
+/****************************************************************************/
+#define SET_VOLTAGE_TYPE_ASIC_VDDC             1
+#define SET_VOLTAGE_TYPE_ASIC_MVDDC            2
+#define SET_VOLTAGE_TYPE_ASIC_MVDDQ            3
+#define SET_VOLTAGE_TYPE_ASIC_VDDCI            4
+#define SET_VOLTAGE_INIT_MODE                  5
+#define SET_VOLTAGE_GET_MAX_VOLTAGE            6       /* Gets the Max. voltage for the soldered Asic */
+
+#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE       0x1
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_A         0x2
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_B         0x4
+
+#define        SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE      0x0
+#define        SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL      0x1
+#define        SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK     0x2
+
+typedef struct _SET_VOLTAGE_PARAMETERS {
+       UCHAR ucVoltageType;    /*  To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
+       UCHAR ucVoltageMode;    /*  To set all, to set source A or source B or ... */
+       UCHAR ucVoltageIndex;   /*  An index to tell which voltage level */
+       UCHAR ucReserved;
+} SET_VOLTAGE_PARAMETERS;
+
+typedef struct _SET_VOLTAGE_PARAMETERS_V2 {
+       UCHAR ucVoltageType;    /*  To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
+       UCHAR ucVoltageMode;    /*  Not used, maybe use for state machine for differen power mode */
+       USHORT usVoltageLevel;  /*  real voltage level */
+} SET_VOLTAGE_PARAMETERS_V2;
+
+typedef struct _SET_VOLTAGE_PS_ALLOCATION {
+       SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+       WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+} SET_VOLTAGE_PS_ALLOCATION;
+
+/****************************************************************************/
+/*  Structures used by TVEncoderControlTable */
+/****************************************************************************/
+typedef struct _TV_ENCODER_CONTROL_PARAMETERS {
+       USHORT usPixelClock;    /*  in 10KHz; for bios convenient */
+       UCHAR ucTvStandard;     /*  See definition "ATOM_TV_NTSC ..." */
+       UCHAR ucAction;         /*  0: turn off encoder */
+       /*  1: setup and turn on encoder */
+} TV_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION {
+       TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
+       WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;     /*  Don't set this one */
+} TV_ENCODER_CONTROL_PS_ALLOCATION;
+
+/* ==============================Data Table Portion==================================== */
+
+#ifdef UEFI_BUILD
+#define        UTEMP   USHORT
+#define        USHORT  void*
+#endif
+
+/****************************************************************************/
+/*  Structure used in Data.mtb */
+/****************************************************************************/
+typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES {
+       USHORT UtilityPipeLine; /*  Offest for the utility to get parser info,Don't change this position! */
+       USHORT MultimediaCapabilityInfo;        /*  Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */
+       USHORT MultimediaConfigInfo;    /*  Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */
+       USHORT StandardVESA_Timing;     /*  Only used by Bios */
+       USHORT FirmwareInfo;    /*  Shared by various SW components,latest version 1.4 */
+       USHORT DAC_Info;        /*  Will be obsolete from R600 */
+       USHORT LVDS_Info;       /*  Shared by various SW components,latest version 1.1 */
+       USHORT TMDS_Info;       /*  Will be obsolete from R600 */
+       USHORT AnalogTV_Info;   /*  Shared by various SW components,latest version 1.1 */
+       USHORT SupportedDevicesInfo;    /*  Will be obsolete from R600 */
+       USHORT GPIO_I2C_Info;   /*  Shared by various SW components,latest version 1.2 will be used from R600 */
+       USHORT VRAM_UsageByFirmware;    /*  Shared by various SW components,latest version 1.3 will be used from R600 */
+       USHORT GPIO_Pin_LUT;    /*  Shared by various SW components,latest version 1.1 */
+       USHORT VESA_ToInternalModeLUT;  /*  Only used by Bios */
+       USHORT ComponentVideoInfo;      /*  Shared by various SW components,latest version 2.1 will be used from R600 */
+       USHORT PowerPlayInfo;   /*  Shared by various SW components,latest version 2.1,new design from R600 */
+       USHORT CompassionateData;       /*  Will be obsolete from R600 */
+       USHORT SaveRestoreInfo; /*  Only used by Bios */
+       USHORT PPLL_SS_Info;    /*  Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */
+       USHORT OemInfo;         /*  Defined and used by external SW, should be obsolete soon */
+       USHORT XTMDS_Info;      /*  Will be obsolete from R600 */
+       USHORT MclkSS_Info;     /*  Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */
+       USHORT Object_Header;   /*  Shared by various SW components,latest version 1.1 */
+       USHORT IndirectIOAccess;        /*  Only used by Bios,this table position can't change at all!! */
+       USHORT MC_InitParameter;        /*  Only used by command table */
+       USHORT ASIC_VDDC_Info;  /*  Will be obsolete from R600 */
+       USHORT ASIC_InternalSS_Info;    /*  New tabel name from R600, used to be called "ASIC_MVDDC_Info" */
+       USHORT TV_VideoMode;    /*  Only used by command table */
+       USHORT VRAM_Info;       /*  Only used by command table, latest version 1.3 */
+       USHORT MemoryTrainingInfo;      /*  Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */
+       USHORT IntegratedSystemInfo;    /*  Shared by various SW components */
+       USHORT ASIC_ProfilingInfo;      /*  New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */
+       USHORT VoltageObjectInfo;       /*  Shared by various SW components, latest version 1.1 */
+       USHORT PowerSourceInfo; /*  Shared by various SW components, latest versoin 1.1 */
+} ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+#ifdef UEFI_BUILD
+#define        USHORT  UTEMP
+#endif
+
+typedef struct _ATOM_MASTER_DATA_TABLE {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
+} ATOM_MASTER_DATA_TABLE;
+
+/****************************************************************************/
+/*  Structure used in MultimediaCapabilityInfoTable */
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ULONG ulSignature;      /*  HW info table signature string "$ATI" */
+       UCHAR ucI2C_Type;       /*  I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */
+       UCHAR ucTV_OutInfo;     /*  Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */
+       UCHAR ucVideoPortInfo;  /*  Provides the video port capabilities */
+       UCHAR ucHostPortInfo;   /*  Provides host port configuration information */
+} ATOM_MULTIMEDIA_CAPABILITY_INFO;
+
+/****************************************************************************/
+/*  Structure used in MultimediaConfigInfoTable */
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ULONG ulSignature;      /*  MM info table signature sting "$MMT" */
+       UCHAR ucTunerInfo;      /*  Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */
+       UCHAR ucAudioChipInfo;  /*  List the audio chip type (3:0) product type (4) and OEM revision (7:5) */
+       UCHAR ucProductID;      /*  Defines as OEM ID or ATI board ID dependent on product type setting */
+       UCHAR ucMiscInfo1;      /*  Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */
+       UCHAR ucMiscInfo2;      /*  I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */
+       UCHAR ucMiscInfo3;      /*  Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */
+       UCHAR ucMiscInfo4;      /*  Video Decoder Host Config (2:0) reserved (7:3) */
+       UCHAR ucVideoInput0Info;        /*  Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
+       UCHAR ucVideoInput1Info;        /*  Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
+       UCHAR ucVideoInput2Info;        /*  Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
+       UCHAR ucVideoInput3Info;        /*  Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
+       UCHAR ucVideoInput4Info;        /*  Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
+} ATOM_MULTIMEDIA_CONFIG_INFO;
+
+/****************************************************************************/
+/*  Structures used in FirmwareInfoTable */
+/****************************************************************************/
+
+/*  usBIOSCapability Defintion: */
+/*  Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
+/*  Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
+/*  Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
+/*  Others: Reserved */
+#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED         0x0001
+#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT            0x0002
+#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT     0x0004
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT      0x0008
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT      0x0010
+#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU         0x0020
+#define ATOM_BIOS_INFO_WMI_SUPPORT                  0x0040
+#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM   0x0080
+#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT          0x0100
+#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK        0x1E00
+#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
+#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE  0x4000
+
+#ifndef _H2INC
+
+/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
+typedef struct _ATOM_FIRMWARE_CAPABILITY {
+#if ATOM_BIG_ENDIAN
+       USHORT Reserved:3;
+       USHORT HyperMemory_Size:4;
+       USHORT HyperMemory_Support:1;
+       USHORT PPMode_Assigned:1;
+       USHORT WMI_SUPPORT:1;
+       USHORT GPUControlsBL:1;
+       USHORT EngineClockSS_Support:1;
+       USHORT MemoryClockSS_Support:1;
+       USHORT ExtendedDesktopSupport:1;
+       USHORT DualCRTC_Support:1;
+       USHORT FirmwarePosted:1;
+#else
+       USHORT FirmwarePosted:1;
+       USHORT DualCRTC_Support:1;
+       USHORT ExtendedDesktopSupport:1;
+       USHORT MemoryClockSS_Support:1;
+       USHORT EngineClockSS_Support:1;
+       USHORT GPUControlsBL:1;
+       USHORT WMI_SUPPORT:1;
+       USHORT PPMode_Assigned:1;
+       USHORT HyperMemory_Support:1;
+       USHORT HyperMemory_Size:4;
+       USHORT Reserved:3;
+#endif
+} ATOM_FIRMWARE_CAPABILITY;
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
+       ATOM_FIRMWARE_CAPABILITY sbfAccess;
+       USHORT susAccess;
+} ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#else
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
+       USHORT susAccess;
+} ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#endif
+
+typedef struct _ATOM_FIRMWARE_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ULONG ulFirmwareRevision;
+       ULONG ulDefaultEngineClock;     /* In 10Khz unit */
+       ULONG ulDefaultMemoryClock;     /* In 10Khz unit */
+       ULONG ulDriverTargetEngineClock;        /* In 10Khz unit */
+       ULONG ulDriverTargetMemoryClock;        /* In 10Khz unit */
+       ULONG ulMaxEngineClockPLL_Output;       /* In 10Khz unit */
+       ULONG ulMaxMemoryClockPLL_Output;       /* In 10Khz unit */
+       ULONG ulMaxPixelClockPLL_Output;        /* In 10Khz unit */
+       ULONG ulASICMaxEngineClock;     /* In 10Khz unit */
+       ULONG ulASICMaxMemoryClock;     /* In 10Khz unit */
+       UCHAR ucASICMaxTemperature;
+       UCHAR ucPadding[3];     /* Don't use them */
+       ULONG aulReservedForBIOS[3];    /* Don't use them */
+       USHORT usMinEngineClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMaxEngineClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMinEngineClockPLL_Output;      /* In 10Khz unit */
+       USHORT usMinMemoryClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMaxMemoryClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMinMemoryClockPLL_Output;      /* In 10Khz unit */
+       USHORT usMaxPixelClock; /* In 10Khz unit, Max.  Pclk */
+       USHORT usMinPixelClockPLL_Input;        /* In 10Khz unit */
+       USHORT usMaxPixelClockPLL_Input;        /* In 10Khz unit */
+       USHORT usMinPixelClockPLL_Output;       /* In 10Khz unit, the definitions above can't change!!! */
+       ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+       USHORT usReferenceClock;        /* In 10Khz unit */
+       USHORT usPM_RTS_Location;       /* RTS PM4 starting location in ROM in 1Kb unit */
+       UCHAR ucPM_RTS_StreamSize;      /* RTS PM4 packets in Kb unit */
+       UCHAR ucDesign_ID;      /* Indicate what is the board design */
+       UCHAR ucMemoryModule_ID;        /* Indicate what is the board design */
+} ATOM_FIRMWARE_INFO;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_2 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ULONG ulFirmwareRevision;
+       ULONG ulDefaultEngineClock;     /* In 10Khz unit */
+       ULONG ulDefaultMemoryClock;     /* In 10Khz unit */
+       ULONG ulDriverTargetEngineClock;        /* In 10Khz unit */
+       ULONG ulDriverTargetMemoryClock;        /* In 10Khz unit */
+       ULONG ulMaxEngineClockPLL_Output;       /* In 10Khz unit */
+       ULONG ulMaxMemoryClockPLL_Output;       /* In 10Khz unit */
+       ULONG ulMaxPixelClockPLL_Output;        /* In 10Khz unit */
+       ULONG ulASICMaxEngineClock;     /* In 10Khz unit */
+       ULONG ulASICMaxMemoryClock;     /* In 10Khz unit */
+       UCHAR ucASICMaxTemperature;
+       UCHAR ucMinAllowedBL_Level;
+       UCHAR ucPadding[2];     /* Don't use them */
+       ULONG aulReservedForBIOS[2];    /* Don't use them */
+       ULONG ulMinPixelClockPLL_Output;        /* In 10Khz unit */
+       USHORT usMinEngineClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMaxEngineClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMinEngineClockPLL_Output;      /* In 10Khz unit */
+       USHORT usMinMemoryClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMaxMemoryClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMinMemoryClockPLL_Output;      /* In 10Khz unit */
+       USHORT usMaxPixelClock; /* In 10Khz unit, Max.  Pclk */
+       USHORT usMinPixelClockPLL_Input;        /* In 10Khz unit */
+       USHORT usMaxPixelClockPLL_Input;        /* In 10Khz unit */
+       USHORT usMinPixelClockPLL_Output;       /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
+       ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+       USHORT usReferenceClock;        /* In 10Khz unit */
+       USHORT usPM_RTS_Location;       /* RTS PM4 starting location in ROM in 1Kb unit */
+       UCHAR ucPM_RTS_StreamSize;      /* RTS PM4 packets in Kb unit */
+       UCHAR ucDesign_ID;      /* Indicate what is the board design */
+       UCHAR ucMemoryModule_ID;        /* Indicate what is the board design */
+} ATOM_FIRMWARE_INFO_V1_2;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_3 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ULONG ulFirmwareRevision;
+       ULONG ulDefaultEngineClock;     /* In 10Khz unit */
+       ULONG ulDefaultMemoryClock;     /* In 10Khz unit */
+       ULONG ulDriverTargetEngineClock;        /* In 10Khz unit */
+       ULONG ulDriverTargetMemoryClock;        /* In 10Khz unit */
+       ULONG ulMaxEngineClockPLL_Output;       /* In 10Khz unit */
+       ULONG ulMaxMemoryClockPLL_Output;       /* In 10Khz unit */
+       ULONG ulMaxPixelClockPLL_Output;        /* In 10Khz unit */
+       ULONG ulASICMaxEngineClock;     /* In 10Khz unit */
+       ULONG ulASICMaxMemoryClock;     /* In 10Khz unit */
+       UCHAR ucASICMaxTemperature;
+       UCHAR ucMinAllowedBL_Level;
+       UCHAR ucPadding[2];     /* Don't use them */
+       ULONG aulReservedForBIOS;       /* Don't use them */
+       ULONG ul3DAccelerationEngineClock;      /* In 10Khz unit */
+       ULONG ulMinPixelClockPLL_Output;        /* In 10Khz unit */
+       USHORT usMinEngineClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMaxEngineClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMinEngineClockPLL_Output;      /* In 10Khz unit */
+       USHORT usMinMemoryClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMaxMemoryClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMinMemoryClockPLL_Output;      /* In 10Khz unit */
+       USHORT usMaxPixelClock; /* In 10Khz unit, Max.  Pclk */
+       USHORT usMinPixelClockPLL_Input;        /* In 10Khz unit */
+       USHORT usMaxPixelClockPLL_Input;        /* In 10Khz unit */
+       USHORT usMinPixelClockPLL_Output;       /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
+       ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+       USHORT usReferenceClock;        /* In 10Khz unit */
+       USHORT usPM_RTS_Location;       /* RTS PM4 starting location in ROM in 1Kb unit */
+       UCHAR ucPM_RTS_StreamSize;      /* RTS PM4 packets in Kb unit */
+       UCHAR ucDesign_ID;      /* Indicate what is the board design */
+       UCHAR ucMemoryModule_ID;        /* Indicate what is the board design */
+} ATOM_FIRMWARE_INFO_V1_3;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_4 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ULONG ulFirmwareRevision;
+       ULONG ulDefaultEngineClock;     /* In 10Khz unit */
+       ULONG ulDefaultMemoryClock;     /* In 10Khz unit */
+       ULONG ulDriverTargetEngineClock;        /* In 10Khz unit */
+       ULONG ulDriverTargetMemoryClock;        /* In 10Khz unit */
+       ULONG ulMaxEngineClockPLL_Output;       /* In 10Khz unit */
+       ULONG ulMaxMemoryClockPLL_Output;       /* In 10Khz unit */
+       ULONG ulMaxPixelClockPLL_Output;        /* In 10Khz unit */
+       ULONG ulASICMaxEngineClock;     /* In 10Khz unit */
+       ULONG ulASICMaxMemoryClock;     /* In 10Khz unit */
+       UCHAR ucASICMaxTemperature;
+       UCHAR ucMinAllowedBL_Level;
+       USHORT usBootUpVDDCVoltage;     /* In MV unit */
+       USHORT usLcdMinPixelClockPLL_Output;    /*  In MHz unit */
+       USHORT usLcdMaxPixelClockPLL_Output;    /*  In MHz unit */
+       ULONG ul3DAccelerationEngineClock;      /* In 10Khz unit */
+       ULONG ulMinPixelClockPLL_Output;        /* In 10Khz unit */
+       USHORT usMinEngineClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMaxEngineClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMinEngineClockPLL_Output;      /* In 10Khz unit */
+       USHORT usMinMemoryClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMaxMemoryClockPLL_Input;       /* In 10Khz unit */
+       USHORT usMinMemoryClockPLL_Output;      /* In 10Khz unit */
+       USHORT usMaxPixelClock; /* In 10Khz unit, Max.  Pclk */
+       USHORT usMinPixelClockPLL_Input;        /* In 10Khz unit */
+       USHORT usMaxPixelClockPLL_Input;        /* In 10Khz unit */
+       USHORT usMinPixelClockPLL_Output;       /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
+       ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+       USHORT usReferenceClock;        /* In 10Khz unit */
+       USHORT usPM_RTS_Location;       /* RTS PM4 starting location in ROM in 1Kb unit */
+       UCHAR ucPM_RTS_StreamSize;      /* RTS PM4 packets in Kb unit */
+       UCHAR ucDesign_ID;      /* Indicate what is the board design */
+       UCHAR ucMemoryModule_ID;        /* Indicate what is the board design */
+} ATOM_FIRMWARE_INFO_V1_4;
+
+#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V1_4
+
+/****************************************************************************/
+/*  Structures used in IntegratedSystemInfoTable */
+/****************************************************************************/
+#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN      0x2
+#define IGP_CAP_FLAG_AC_CARD               0x4
+#define IGP_CAP_FLAG_SDVO_CARD             0x8
+#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE     0x10
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ULONG ulBootUpEngineClock;      /* in 10kHz unit */
+       ULONG ulBootUpMemoryClock;      /* in 10kHz unit */
+       ULONG ulMaxSystemMemoryClock;   /* in 10kHz unit */
+       ULONG ulMinSystemMemoryClock;   /* in 10kHz unit */
+       UCHAR ucNumberOfCyclesInPeriodHi;
+       UCHAR ucLCDTimingSel;   /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */
+       USHORT usReserved1;
+       USHORT usInterNBVoltageLow;     /* An intermidiate PMW value to set the voltage */
+       USHORT usInterNBVoltageHigh;    /* Another intermidiate PMW value to set the voltage */
+       ULONG ulReserved[2];
+
+       USHORT usFSBClock;      /* In MHz unit */
+       USHORT usCapabilityFlag;        /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */
+       /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */
+       /* Bit[4]==1: P/2 mode, ==0: P/1 mode */
+       USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */
+       USHORT usK8MemoryClock; /* in MHz unit */
+       USHORT usK8SyncStartDelay;      /* in 0.01 us unit */
+       USHORT usK8DataReturnTime;      /* in 0.01 us unit */
+       UCHAR ucMaxNBVoltage;
+       UCHAR ucMinNBVoltage;
+       UCHAR ucMemoryType;     /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */
+       UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */
+       UCHAR ucStartingPWM_HighTime;   /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */
+       UCHAR ucHTLinkWidth;    /* 16 bit vs. 8 bit */
+       UCHAR ucMaxNBVoltageHigh;
+       UCHAR ucMinNBVoltageHigh;
+} ATOM_INTEGRATED_SYSTEM_INFO;
+
+/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
+ulBootUpMemoryClock:    For Intel IGP,it's the UMA system memory clock
+                        For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
+ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+                        For AMD IGP,for now this can be 0
+ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+                        For AMD IGP,for now this can be 0
+
+usFSBClock:             For Intel IGP,it's FSB Freq
+                        For AMD IGP,it's HT Link Speed
+
+usK8MemoryClock:        For AMD IGP only. For RevF CPU, set it to 200
+usK8SyncStartDelay:     For AMD IGP only. Memory access latency in K8, required for watermark calculation
+usK8DataReturnTime:     For AMD IGP only. Memory access latency in K8, required for watermark calculation
+
+VC:Voltage Control
+ucMaxNBVoltage:         Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltage:         Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+ucNumberOfCyclesInPeriod:   Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
+ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
+
+ucMaxNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of  the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+usInterNBVoltageLow:    Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
+usInterNBVoltageHigh:   Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
+*/
+
+/*
+The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
+Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
+The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
+
+SW components can access the IGP system infor structure in the same way as before
+*/
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ULONG ulBootUpEngineClock;      /* in 10kHz unit */
+       ULONG ulReserved1[2];   /* must be 0x0 for the reserved */
+       ULONG ulBootUpUMAClock; /* in 10kHz unit */
+       ULONG ulBootUpSidePortClock;    /* in 10kHz unit */
+       ULONG ulMinSidePortClock;       /* in 10kHz unit */
+       ULONG ulReserved2[6];   /* must be 0x0 for the reserved */
+       ULONG ulSystemConfig;   /* see explanation below */
+       ULONG ulBootUpReqDisplayVector;
+       ULONG ulOtherDisplayMisc;
+       ULONG ulDDISlot1Config;
+       ULONG ulDDISlot2Config;
+       UCHAR ucMemoryType;     /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */
+       UCHAR ucUMAChannelNumber;
+       UCHAR ucDockingPinBit;
+       UCHAR ucDockingPinPolarity;
+       ULONG ulDockingPinCFGInfo;
+       ULONG ulCPUCapInfo;
+       USHORT usNumberOfCyclesInPeriod;
+       USHORT usMaxNBVoltage;
+       USHORT usMinNBVoltage;
+       USHORT usBootUpNBVoltage;
+       ULONG ulHTLinkFreq;     /* in 10Khz */
+       USHORT usMinHTLinkWidth;
+       USHORT usMaxHTLinkWidth;
+       USHORT usUMASyncStartDelay;
+       USHORT usUMADataReturnTime;
+       USHORT usLinkStatusZeroTime;
+       USHORT usReserved;
+       ULONG ulHighVoltageHTLinkFreq;  /*  in 10Khz */
+       ULONG ulLowVoltageHTLinkFreq;   /*  in 10Khz */
+       USHORT usMaxUpStreamHTLinkWidth;
+       USHORT usMaxDownStreamHTLinkWidth;
+       USHORT usMinUpStreamHTLinkWidth;
+       USHORT usMinDownStreamHTLinkWidth;
+       ULONG ulReserved3[97];  /* must be 0x0 */
+} ATOM_INTEGRATED_SYSTEM_INFO_V2;
+
+/*
+ulBootUpEngineClock:   Boot-up Engine Clock in 10Khz;
+ulBootUpUMAClock:      Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
+ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
+
+ulSystemConfig:
+Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
+Bit[1]=1: system boots up at AMD overdrived state or user customized  mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
+      =0: system boots up at driver control state. Power state depends on PowerPlay table.
+Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
+Bit[3]=1: Only one power state(Performance) will be supported.
+      =0: Multiple power states supported from PowerPlay table.
+Bit[4]=1: CLMC is supported and enabled on current system.
+      =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
+Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
+      =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
+Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
+      =0: Voltage settings is determined by powerplay table.
+Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
+      =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
+
+ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
+
+ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
+                                     [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition;
+
+ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
+      [3:0]  - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
+                       [7:4]  - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
+                       [15:8] - Lane configuration attribute;
+      [23:16]- Connector type, possible value:
+               CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
+               CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
+               CONNECTOR_OBJECT_ID_HDMI_TYPE_A
+               CONNECTOR_OBJECT_ID_DISPLAYPORT
+                       [31:24]- Reserved
+
+ulDDISlot2Config: Same as Slot1.
+ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC.
+For IGP, Hypermemory is the only memory type showed in CCC.
+
+ucUMAChannelNumber:  how many channels for the UMA;
+
+ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
+ucDockingPinBit:     which bit in this register to read the pin status;
+ucDockingPinPolarity:Polarity of the pin when docked;
+
+ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
+
+usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
+usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
+usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
+                    GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
+                    PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
+                    GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
+usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
+
+ulHTLinkFreq:       Bootup HT link Frequency in 10Khz.
+usMinHTLinkWidth:   Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
+                    If CDLW enabled, both upstream and downstream width should be the same during bootup.
+usMaxHTLinkWidth:   Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
+                    If CDLW enabled, both upstream and downstream width should be the same during bootup.
+
+usUMASyncStartDelay: Memory access latency, required for watermark calculation
+usUMADataReturnTime: Memory access latency, required for watermark calculation
+usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
+for Griffin or Greyhound. SBIOS needs to convert to actual time by:
+                     if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
+                     if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
+                     if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us)
+                     if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
+
+ulHighVoltageHTLinkFreq:     HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
+                             This must be less than or equal to ulHTLinkFreq(bootup frequency).
+ulLowVoltageHTLinkFreq:      HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
+                             This must be less than or equal to ulHighVoltageHTLinkFreq.
+
+usMaxUpStreamHTLinkWidth:    Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now.
+usMaxDownStreamHTLinkWidth:  same as above.
+usMinUpStreamHTLinkWidth:    Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now.
+usMinDownStreamHTLinkWidth:  same as above.
+*/
+
+#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE                 0x00000001
+#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE             0x00000002
+#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE                  0x00000004
+#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY         0x00000008
+#define SYSTEM_CONFIG_CLMC_ENABLED                        0x00000010
+#define SYSTEM_CONFIG_CDLW_ENABLED                        0x00000020
+#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED              0x00000040
+#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED            0x00000080
+
+#define IGP_DDI_SLOT_LANE_CONFIG_MASK                     0x000000FF
+
+#define b0IGP_DDI_SLOT_LANE_MAP_MASK                      0x0F
+#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK              0xF0
+#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3                    0x01
+#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7                    0x02
+#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11                   0x04
+#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15                  0x08
+
+#define IGP_DDI_SLOT_ATTRIBUTE_MASK                       0x0000FF00
+#define IGP_DDI_SLOT_CONFIG_REVERSED                      0x00000100
+#define b1IGP_DDI_SLOT_CONFIG_REVERSED                    0x01
+
+#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK                  0x00FF0000
+
+#define ATOM_CRT_INT_ENCODER1_INDEX                       0x00000000
+#define ATOM_LCD_INT_ENCODER1_INDEX                       0x00000001
+#define ATOM_TV_INT_ENCODER1_INDEX                        0x00000002
+#define ATOM_DFP_INT_ENCODER1_INDEX                       0x00000003
+#define ATOM_CRT_INT_ENCODER2_INDEX                       0x00000004
+#define ATOM_LCD_EXT_ENCODER1_INDEX                       0x00000005
+#define ATOM_TV_EXT_ENCODER1_INDEX                        0x00000006
+#define ATOM_DFP_EXT_ENCODER1_INDEX                       0x00000007
+#define ATOM_CV_INT_ENCODER1_INDEX                        0x00000008
+#define ATOM_DFP_INT_ENCODER2_INDEX                       0x00000009
+#define ATOM_CRT_EXT_ENCODER1_INDEX                       0x0000000A
+#define ATOM_CV_EXT_ENCODER1_INDEX                        0x0000000B
+#define ATOM_DFP_INT_ENCODER3_INDEX                       0x0000000C
+#define ATOM_DFP_INT_ENCODER4_INDEX                       0x0000000D
+
+/*  define ASIC internal encoder id ( bit vector ) */
+#define ASIC_INT_DAC1_ENCODER_ID                                                                                       0x00
+#define ASIC_INT_TV_ENCODER_ID                                                                                                         0x02
+#define ASIC_INT_DIG1_ENCODER_ID                                                                                                       0x03
+#define ASIC_INT_DAC2_ENCODER_ID                                                                                                       0x04
+#define ASIC_EXT_TV_ENCODER_ID                                                                                                         0x06
+#define ASIC_INT_DVO_ENCODER_ID                                                                                                                0x07
+#define ASIC_INT_DIG2_ENCODER_ID                                                                                                       0x09
+#define ASIC_EXT_DIG_ENCODER_ID                                                                                                                0x05
+
+/* define Encoder attribute */
+#define ATOM_ANALOG_ENCODER                                                                                                                            0
+#define ATOM_DIGITAL_ENCODER                                                                                                                   1
+
+#define ATOM_DEVICE_CRT1_INDEX                            0x00000000
+#define ATOM_DEVICE_LCD1_INDEX                            0x00000001
+#define ATOM_DEVICE_TV1_INDEX                             0x00000002
+#define ATOM_DEVICE_DFP1_INDEX                            0x00000003
+#define ATOM_DEVICE_CRT2_INDEX                            0x00000004
+#define ATOM_DEVICE_LCD2_INDEX                            0x00000005
+#define ATOM_DEVICE_TV2_INDEX                             0x00000006
+#define ATOM_DEVICE_DFP2_INDEX                            0x00000007
+#define ATOM_DEVICE_CV_INDEX                              0x00000008
+#define ATOM_DEVICE_DFP3_INDEX                                                                                                         0x00000009
+#define ATOM_DEVICE_DFP4_INDEX                                                                                                         0x0000000A
+#define ATOM_DEVICE_DFP5_INDEX                                                                                                         0x0000000B
+#define ATOM_DEVICE_RESERVEDC_INDEX                       0x0000000C
+#define ATOM_DEVICE_RESERVEDD_INDEX                       0x0000000D
+#define ATOM_DEVICE_RESERVEDE_INDEX                       0x0000000E
+#define ATOM_DEVICE_RESERVEDF_INDEX                       0x0000000F
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO                    (ATOM_DEVICE_DFP3_INDEX+1)
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2                  ATOM_MAX_SUPPORTED_DEVICE_INFO
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3                  (ATOM_DEVICE_DFP5_INDEX + 1)
+
+#define ATOM_MAX_SUPPORTED_DEVICE                         (ATOM_DEVICE_RESERVEDF_INDEX+1)
+
+#define ATOM_DEVICE_CRT1_SUPPORT                          (0x1L << ATOM_DEVICE_CRT1_INDEX)
+#define ATOM_DEVICE_LCD1_SUPPORT                          (0x1L << ATOM_DEVICE_LCD1_INDEX)
+#define ATOM_DEVICE_TV1_SUPPORT                           (0x1L << ATOM_DEVICE_TV1_INDEX)
+#define ATOM_DEVICE_DFP1_SUPPORT                          (0x1L << ATOM_DEVICE_DFP1_INDEX)
+#define ATOM_DEVICE_CRT2_SUPPORT                          (0x1L << ATOM_DEVICE_CRT2_INDEX)
+#define ATOM_DEVICE_LCD2_SUPPORT                          (0x1L << ATOM_DEVICE_LCD2_INDEX)
+#define ATOM_DEVICE_TV2_SUPPORT                           (0x1L << ATOM_DEVICE_TV2_INDEX)
+#define ATOM_DEVICE_DFP2_SUPPORT                          (0x1L << ATOM_DEVICE_DFP2_INDEX)
+#define ATOM_DEVICE_CV_SUPPORT                            (0x1L << ATOM_DEVICE_CV_INDEX)
+#define ATOM_DEVICE_DFP3_SUPPORT                                                                                                       (0x1L << ATOM_DEVICE_DFP3_INDEX)
+#define ATOM_DEVICE_DFP4_SUPPORT                                                                                                       (0x1L << ATOM_DEVICE_DFP4_INDEX )
+#define ATOM_DEVICE_DFP5_SUPPORT                                                                                                       (0x1L << ATOM_DEVICE_DFP5_INDEX)
+
+#define ATOM_DEVICE_CRT_SUPPORT \
+       (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
+#define ATOM_DEVICE_DFP_SUPPORT \
+       (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \
+        ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \
+        ATOM_DEVICE_DFP5_SUPPORT)
+#define ATOM_DEVICE_TV_SUPPORT \
+       (ATOM_DEVICE_TV1_SUPPORT  | ATOM_DEVICE_TV2_SUPPORT)
+#define ATOM_DEVICE_LCD_SUPPORT \
+       (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
+
+#define ATOM_DEVICE_CONNECTOR_TYPE_MASK                   0x000000F0
+#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT                  0x00000004
+#define ATOM_DEVICE_CONNECTOR_VGA                         0x00000001
+#define ATOM_DEVICE_CONNECTOR_DVI_I                       0x00000002
+#define ATOM_DEVICE_CONNECTOR_DVI_D                       0x00000003
+#define ATOM_DEVICE_CONNECTOR_DVI_A                       0x00000004
+#define ATOM_DEVICE_CONNECTOR_SVIDEO                      0x00000005
+#define ATOM_DEVICE_CONNECTOR_COMPOSITE                   0x00000006
+#define ATOM_DEVICE_CONNECTOR_LVDS                        0x00000007
+#define ATOM_DEVICE_CONNECTOR_DIGI_LINK                   0x00000008
+#define ATOM_DEVICE_CONNECTOR_SCART                       0x00000009
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A                 0x0000000A
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B                 0x0000000B
+#define ATOM_DEVICE_CONNECTOR_CASE_1                      0x0000000E
+#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT                 0x0000000F
+
+#define ATOM_DEVICE_DAC_INFO_MASK                         0x0000000F
+#define ATOM_DEVICE_DAC_INFO_SHIFT                        0x00000000
+#define ATOM_DEVICE_DAC_INFO_NODAC                        0x00000000
+#define ATOM_DEVICE_DAC_INFO_DACA                         0x00000001
+#define ATOM_DEVICE_DAC_INFO_DACB                         0x00000002
+#define ATOM_DEVICE_DAC_INFO_EXDAC                        0x00000003
+
+#define ATOM_DEVICE_I2C_ID_NOI2C                          0x00000000
+
+#define ATOM_DEVICE_I2C_LINEMUX_MASK                      0x0000000F
+#define ATOM_DEVICE_I2C_LINEMUX_SHIFT                     0x00000000
+
+#define ATOM_DEVICE_I2C_ID_MASK                           0x00000070
+#define ATOM_DEVICE_I2C_ID_SHIFT                          0x00000004
+#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE              0x00000001
+#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE                  0x00000002
+#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE                0x00000003   /* For IGP RS600 */
+#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL                 0x00000004   /* For IGP RS690 */
+
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK                 0x00000080
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT                0x00000007
+#define        ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C            0x00000000
+#define        ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C            0x00000001
+
+/*   usDeviceSupport: */
+/*   Bits0       = 0 - no CRT1 support= 1- CRT1 is supported */
+/*   Bit 1       = 0 - no LCD1 support= 1- LCD1 is supported */
+/*   Bit 2       = 0 - no TV1  support= 1- TV1  is supported */
+/*   Bit 3       = 0 - no DFP1 support= 1- DFP1 is supported */
+/*   Bit 4       = 0 - no CRT2 support= 1- CRT2 is supported */
+/*   Bit 5       = 0 - no LCD2 support= 1- LCD2 is supported */
+/*   Bit 6       = 0 - no TV2  support= 1- TV2  is supported */
+/*   Bit 7       = 0 - no DFP2 support= 1- DFP2 is supported */
+/*   Bit 8       = 0 - no CV   support= 1- CV   is supported */
+/*   Bit 9       = 0 - no DFP3 support= 1- DFP3 is supported */
+/*   Byte1 (Supported Device Info) */
+/*   Bit 0       = = 0 - no CV support= 1- CV is supported */
+/*  */
+/*  */
+
+/*               ucI2C_ConfigID */
+/*     [7:0] - I2C LINE Associate ID */
+/*           = 0   - no I2C */
+/*     [7]               -       HW_Cap        = 1,  [6:0]=HW assisted I2C ID(HW line selection) */
+/*                           =   0,  [6:0]=SW assisted I2C ID */
+/*     [6-4]     - HW_ENGINE_ID  =       1,  HW engine for NON multimedia use */
+/*                           =   2,      HW engine for Multimedia use */
+/*                           =   3-7     Reserved for future I2C engines */
+/*               [3-0] - I2C_LINE_MUX  = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */
+
+typedef struct _ATOM_I2C_ID_CONFIG {
+#if ATOM_BIG_ENDIAN
+       UCHAR bfHW_Capable:1;
+       UCHAR bfHW_EngineID:3;
+       UCHAR bfI2C_LineMux:4;
+#else
+       UCHAR bfI2C_LineMux:4;
+       UCHAR bfHW_EngineID:3;
+       UCHAR bfHW_Capable:1;
+#endif
+} ATOM_I2C_ID_CONFIG;
+
+typedef union _ATOM_I2C_ID_CONFIG_ACCESS {
+       ATOM_I2C_ID_CONFIG sbfAccess;
+       UCHAR ucAccess;
+} ATOM_I2C_ID_CONFIG_ACCESS;
+
+/****************************************************************************/
+/*  Structure used in GPIO_I2C_InfoTable */
+/****************************************************************************/
+typedef struct _ATOM_GPIO_I2C_ASSIGMENT {
+       USHORT usClkMaskRegisterIndex;
+       USHORT usClkEnRegisterIndex;
+       USHORT usClkY_RegisterIndex;
+       USHORT usClkA_RegisterIndex;
+       USHORT usDataMaskRegisterIndex;
+       USHORT usDataEnRegisterIndex;
+       USHORT usDataY_RegisterIndex;
+       USHORT usDataA_RegisterIndex;
+       ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+       UCHAR ucClkMaskShift;
+       UCHAR ucClkEnShift;
+       UCHAR ucClkY_Shift;
+       UCHAR ucClkA_Shift;
+       UCHAR ucDataMaskShift;
+       UCHAR ucDataEnShift;
+       UCHAR ucDataY_Shift;
+       UCHAR ucDataA_Shift;
+       UCHAR ucReserved1;
+       UCHAR ucReserved2;
+} ATOM_GPIO_I2C_ASSIGMENT;
+
+typedef struct _ATOM_GPIO_I2C_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
+} ATOM_GPIO_I2C_INFO;
+
+/****************************************************************************/
+/*  Common Structure used in other structures */
+/****************************************************************************/
+
+#ifndef _H2INC
+
+/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
+typedef struct _ATOM_MODE_MISC_INFO {
+#if ATOM_BIG_ENDIAN
+       USHORT Reserved:6;
+       USHORT RGB888:1;
+       USHORT DoubleClock:1;
+       USHORT Interlace:1;
+       USHORT CompositeSync:1;
+       USHORT V_ReplicationBy2:1;
+       USHORT H_ReplicationBy2:1;
+       USHORT VerticalCutOff:1;
+       USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
+       USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
+       USHORT HorizontalCutOff:1;
+#else
+       USHORT HorizontalCutOff:1;
+       USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
+       USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
+       USHORT VerticalCutOff:1;
+       USHORT H_ReplicationBy2:1;
+       USHORT V_ReplicationBy2:1;
+       USHORT CompositeSync:1;
+       USHORT Interlace:1;
+       USHORT DoubleClock:1;
+       USHORT RGB888:1;
+       USHORT Reserved:6;
+#endif
+} ATOM_MODE_MISC_INFO;
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS {
+       ATOM_MODE_MISC_INFO sbfAccess;
+       USHORT usAccess;
+} ATOM_MODE_MISC_INFO_ACCESS;
+
+#else
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS {
+       USHORT usAccess;
+} ATOM_MODE_MISC_INFO_ACCESS;
+
+#endif
+
+/*  usModeMiscInfo- */
+#define ATOM_H_CUTOFF           0x01
+#define ATOM_HSYNC_POLARITY     0x02   /* 0=Active High, 1=Active Low */
+#define ATOM_VSYNC_POLARITY     0x04   /* 0=Active High, 1=Active Low */
+#define ATOM_V_CUTOFF           0x08
+#define ATOM_H_REPLICATIONBY2   0x10
+#define ATOM_V_REPLICATIONBY2   0x20
+#define ATOM_COMPOSITESYNC      0x40
+#define ATOM_INTERLACE          0x80
+#define ATOM_DOUBLE_CLOCK_MODE  0x100
+#define ATOM_RGB888_MODE        0x200
+
+/* usRefreshRate- */
+#define ATOM_REFRESH_43         43
+#define ATOM_REFRESH_47         47
+#define ATOM_REFRESH_56         56
+#define ATOM_REFRESH_60         60
+#define ATOM_REFRESH_65         65
+#define ATOM_REFRESH_70         70
+#define ATOM_REFRESH_72         72
+#define ATOM_REFRESH_75         75
+#define ATOM_REFRESH_85         85
+
+/*  ATOM_MODE_TIMING data are exactly the same as VESA timing data. */
+/*  Translation from EDID to ATOM_MODE_TIMING, use the following formula. */
+/*  */
+/*       VESA_HTOTAL                     =       VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */
+/*                                               =       EDID_HA + EDID_HBL */
+/*       VESA_HDISP                      =       VESA_ACTIVE     =       EDID_HA */
+/*       VESA_HSYNC_START        =       VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */
+/*                                               =       EDID_HA + EDID_HSO */
+/*       VESA_HSYNC_WIDTH        =       VESA_HSYNC_TIME =       EDID_HSPW */
+/*       VESA_BORDER                     =       EDID_BORDER */
+
+/****************************************************************************/
+/*  Structure used in SetCRTC_UsingDTDTimingTable */
+/****************************************************************************/
+typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS {
+       USHORT usH_Size;
+       USHORT usH_Blanking_Time;
+       USHORT usV_Size;
+       USHORT usV_Blanking_Time;
+       USHORT usH_SyncOffset;
+       USHORT usH_SyncWidth;
+       USHORT usV_SyncOffset;
+       USHORT usV_SyncWidth;
+       ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+       UCHAR ucH_Border;       /*  From DFP EDID */
+       UCHAR ucV_Border;
+       UCHAR ucCRTC;           /*  ATOM_CRTC1 or ATOM_CRTC2 */
+       UCHAR ucPadding[3];
+} SET_CRTC_USING_DTD_TIMING_PARAMETERS;
+
+/****************************************************************************/
+/*  Structure used in SetCRTC_TimingTable */
+/****************************************************************************/
+typedef struct _SET_CRTC_TIMING_PARAMETERS {
+       USHORT usH_Total;       /*  horizontal total */
+       USHORT usH_Disp;        /*  horizontal display */
+       USHORT usH_SyncStart;   /*  horozontal Sync start */
+       USHORT usH_SyncWidth;   /*  horizontal Sync width */
+       USHORT usV_Total;       /*  vertical total */
+       USHORT usV_Disp;        /*  vertical display */
+       USHORT usV_SyncStart;   /*  vertical Sync start */
+       USHORT usV_SyncWidth;   /*  vertical Sync width */
+       ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+       UCHAR ucCRTC;           /*  ATOM_CRTC1 or ATOM_CRTC2 */
+       UCHAR ucOverscanRight;  /*  right */
+       UCHAR ucOverscanLeft;   /*  left */
+       UCHAR ucOverscanBottom; /*  bottom */
+       UCHAR ucOverscanTop;    /*  top */
+       UCHAR ucReserved;
+} SET_CRTC_TIMING_PARAMETERS;
+#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
+
+/****************************************************************************/
+/*  Structure used in StandardVESA_TimingTable */
+/*                    AnalogTV_InfoTable */
+/*                    ComponentVideoInfoTable */
+/****************************************************************************/
+typedef struct _ATOM_MODE_TIMING {
+       USHORT usCRTC_H_Total;
+       USHORT usCRTC_H_Disp;
+       USHORT usCRTC_H_SyncStart;
+       USHORT usCRTC_H_SyncWidth;
+       USHORT usCRTC_V_Total;
+       USHORT usCRTC_V_Disp;
+       USHORT usCRTC_V_SyncStart;
+       USHORT usCRTC_V_SyncWidth;
+       USHORT usPixelClock;    /* in 10Khz unit */
+       ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+       USHORT usCRTC_OverscanRight;
+       USHORT usCRTC_OverscanLeft;
+       USHORT usCRTC_OverscanBottom;
+       USHORT usCRTC_OverscanTop;
+       USHORT usReserve;
+       UCHAR ucInternalModeNumber;
+       UCHAR ucRefreshRate;
+} ATOM_MODE_TIMING;
+
+typedef struct _ATOM_DTD_FORMAT {
+       USHORT usPixClk;
+       USHORT usHActive;
+       USHORT usHBlanking_Time;
+       USHORT usVActive;
+       USHORT usVBlanking_Time;
+       USHORT usHSyncOffset;
+       USHORT usHSyncWidth;
+       USHORT usVSyncOffset;
+       USHORT usVSyncWidth;
+       USHORT usImageHSize;
+       USHORT usImageVSize;
+       UCHAR ucHBorder;
+       UCHAR ucVBorder;
+       ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+       UCHAR ucInternalModeNumber;
+       UCHAR ucRefreshRate;
+} ATOM_DTD_FORMAT;
+
+/****************************************************************************/
+/*  Structure used in LVDS_InfoTable */
+/*   * Need a document to describe this table */
+/****************************************************************************/
+#define SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+#define SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+#define SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+#define SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+
+/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */
+/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */
+#define        LCDPANEL_CAP_READ_EDID                                                                  0x1
+
+/* ucTableFormatRevision=1 */
+/* ucTableContentRevision=1 */
+typedef struct _ATOM_LVDS_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_DTD_FORMAT sLCDTiming;
+       USHORT usModePatchTableOffset;
+       USHORT usSupportedRefreshRate;  /* Refer to panel info table in ATOMBIOS extension Spec. */
+       USHORT usOffDelayInMs;
+       UCHAR ucPowerSequenceDigOntoDEin10Ms;
+       UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+       UCHAR ucLVDS_Misc;      /*  Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
+       /*  Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
+       /*  Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
+       /*  Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
+       UCHAR ucPanelDefaultRefreshRate;
+       UCHAR ucPanelIdentification;
+       UCHAR ucSS_Id;
+} ATOM_LVDS_INFO;
+
+/* ucTableFormatRevision=1 */
+/* ucTableContentRevision=2 */
+typedef struct _ATOM_LVDS_INFO_V12 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_DTD_FORMAT sLCDTiming;
+       USHORT usExtInfoTableOffset;
+       USHORT usSupportedRefreshRate;  /* Refer to panel info table in ATOMBIOS extension Spec. */
+       USHORT usOffDelayInMs;
+       UCHAR ucPowerSequenceDigOntoDEin10Ms;
+       UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+       UCHAR ucLVDS_Misc;      /*  Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
+       /*  Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
+       /*  Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
+       /*  Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
+       UCHAR ucPanelDefaultRefreshRate;
+       UCHAR ucPanelIdentification;
+       UCHAR ucSS_Id;
+       USHORT usLCDVenderID;
+       USHORT usLCDProductID;
+       UCHAR ucLCDPanel_SpecialHandlingCap;
+       UCHAR ucPanelInfoSize;  /*   start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */
+       UCHAR ucReserved[2];
+} ATOM_LVDS_INFO_V12;
+
+#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12
+
+typedef struct _ATOM_PATCH_RECORD_MODE {
+       UCHAR ucRecordType;
+       USHORT usHDisp;
+       USHORT usVDisp;
+} ATOM_PATCH_RECORD_MODE;
+
+typedef struct _ATOM_LCD_RTS_RECORD {
+       UCHAR ucRecordType;
+       UCHAR ucRTSValue;
+} ATOM_LCD_RTS_RECORD;
+
+/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */
+typedef struct _ATOM_LCD_MODE_CONTROL_CAP {
+       UCHAR ucRecordType;
+       USHORT usLCDCap;
+} ATOM_LCD_MODE_CONTROL_CAP;
+
+#define LCD_MODE_CAP_BL_OFF                   1
+#define LCD_MODE_CAP_CRTC_OFF                 2
+#define LCD_MODE_CAP_PANEL_OFF                4
+
+typedef struct _ATOM_FAKE_EDID_PATCH_RECORD {
+       UCHAR ucRecordType;
+       UCHAR ucFakeEDIDLength;
+       UCHAR ucFakeEDIDString[1];      /*  This actually has ucFakeEdidLength elements. */
+} ATOM_FAKE_EDID_PATCH_RECORD;
+
+typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
+       UCHAR ucRecordType;
+       USHORT usHSize;
+       USHORT usVSize;
+} ATOM_PANEL_RESOLUTION_PATCH_RECORD;
+
+#define LCD_MODE_PATCH_RECORD_MODE_TYPE       1
+#define LCD_RTS_RECORD_TYPE                   2
+#define LCD_CAP_RECORD_TYPE                   3
+#define LCD_FAKE_EDID_PATCH_RECORD_TYPE       4
+#define LCD_PANEL_RESOLUTION_RECORD_TYPE      5
+#define ATOM_RECORD_END_TYPE                  0xFF
+
+/****************************Spread Spectrum Info Table Definitions **********************/
+
+/* ucTableFormatRevision=1 */
+/* ucTableContentRevision=2 */
+typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
+       USHORT usSpreadSpectrumPercentage;
+       UCHAR ucSpreadSpectrumType;     /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
+       UCHAR ucSS_Step;
+       UCHAR ucSS_Delay;
+       UCHAR ucSS_Id;
+       UCHAR ucRecommandedRef_Div;
+       UCHAR ucSS_Range;       /* it was reserved for V11 */
+} ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
+
+#define ATOM_MAX_SS_ENTRY                      16
+#define ATOM_DP_SS_ID1                                                                                          0x0f1  /*  SS modulation freq=30k */
+#define ATOM_DP_SS_ID2                                                                                          0x0f2  /*  SS modulation freq=33k */
+
+#define ATOM_SS_DOWN_SPREAD_MODE_MASK          0x00000000
+#define ATOM_SS_DOWN_SPREAD_MODE               0x00000000
+#define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+#define ATOM_SS_CENTRE_SPREAD_MODE             0x00000001
+#define ATOM_INTERNAL_SS_MASK                  0x00000000
+#define ATOM_EXTERNAL_SS_MASK                  0x00000002
+#define EXEC_SS_STEP_SIZE_SHIFT                2
+#define EXEC_SS_DELAY_SHIFT                    4
+#define ACTIVEDATA_TO_BLON_DELAY_SHIFT         4
+
+typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
+} ATOM_SPREAD_SPECTRUM_INFO;
+
+/****************************************************************************/
+/*  Structure used in AnalogTV_InfoTable (Top level) */
+/****************************************************************************/
+/* ucTVBootUpDefaultStd definiton: */
+
+/* ATOM_TV_NTSC                1 */
+/* ATOM_TV_NTSCJ               2 */
+/* ATOM_TV_PAL                 3 */
+/* ATOM_TV_PALM                4 */
+/* ATOM_TV_PALCN               5 */
+/* ATOM_TV_PALN                6 */
+/* ATOM_TV_PAL60               7 */
+/* ATOM_TV_SECAM               8 */
+
+/* ucTVSuppportedStd definition: */
+#define NTSC_SUPPORT          0x1
+#define NTSCJ_SUPPORT         0x2
+
+#define PAL_SUPPORT           0x4
+#define PALM_SUPPORT          0x8
+#define PALCN_SUPPORT         0x10
+#define PALN_SUPPORT          0x20
+#define PAL60_SUPPORT         0x40
+#define SECAM_SUPPORT         0x80
+
+#define MAX_SUPPORTED_TV_TIMING    2
+
+typedef struct _ATOM_ANALOG_TV_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       UCHAR ucTV_SupportedStandard;
+       UCHAR ucTV_BootUpDefaultStandard;
+       UCHAR ucExt_TV_ASIC_ID;
+       UCHAR ucExt_TV_ASIC_SlaveAddr;
+       /*ATOM_DTD_FORMAT          aModeTimings[MAX_SUPPORTED_TV_TIMING]; */
+       ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
+} ATOM_ANALOG_TV_INFO;
+
+/**************************************************************************/
+/*  VRAM usage and their defintions */
+
+/*  One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
+/*  Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
+/*  All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */
+/*  To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */
+/*  To Bios:  ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */
+
+#ifndef VESA_MEMORY_IN_64K_BLOCK
+#define VESA_MEMORY_IN_64K_BLOCK        0x100  /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */
+#endif
+
+#define ATOM_EDID_RAW_DATASIZE          256    /* In Bytes */
+#define ATOM_HWICON_SURFACE_SIZE        4096   /* In Bytes */
+#define ATOM_HWICON_INFOTABLE_SIZE      32
+#define MAX_DTD_MODE_IN_VRAM            6
+#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE  (MAX_DTD_MODE_IN_VRAM*28)      /* 28= (SIZEOF ATOM_DTD_FORMAT) */
+#define ATOM_STD_MODE_SUPPORT_TBL_SIZE  (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */
+#define DFP_ENCODER_TYPE_OFFSET                                        0x80
+#define DP_ENCODER_LANE_NUM_OFFSET                     0x84
+#define DP_ENCODER_LINK_RATE_OFFSET                    0x88
+
+#define ATOM_HWICON1_SURFACE_ADDR       0
+#define ATOM_HWICON2_SURFACE_ADDR       (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_HWICON_INFOTABLE_ADDR      (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_CRT1_EDID_ADDR             (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE)
+#define ATOM_CRT1_DTD_MODE_TBL_ADDR     (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT1_STD_MODE_TBL_ADDR        (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD1_EDID_ADDR             (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD1_DTD_MODE_TBL_ADDR     (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD1_STD_MODE_TBL_ADDR    (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_TV1_DTD_MODE_TBL_ADDR      (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP1_EDID_ADDR             (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP1_DTD_MODE_TBL_ADDR     (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP1_STD_MODE_TBL_ADDR        (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CRT2_EDID_ADDR             (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CRT2_DTD_MODE_TBL_ADDR     (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT2_STD_MODE_TBL_ADDR        (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD2_EDID_ADDR             (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD2_DTD_MODE_TBL_ADDR     (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD2_STD_MODE_TBL_ADDR    (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_TV2_EDID_ADDR              (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_TV2_DTD_MODE_TBL_ADDR      (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_TV2_STD_MODE_TBL_ADDR       (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP2_EDID_ADDR             (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP2_DTD_MODE_TBL_ADDR     (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP2_STD_MODE_TBL_ADDR     (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CV_EDID_ADDR               (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CV_DTD_MODE_TBL_ADDR       (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CV_STD_MODE_TBL_ADDR       (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP3_EDID_ADDR             (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP3_DTD_MODE_TBL_ADDR     (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP3_STD_MODE_TBL_ADDR     (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP4_EDID_ADDR             (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP4_DTD_MODE_TBL_ADDR     (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP4_STD_MODE_TBL_ADDR     (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP5_EDID_ADDR             (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP5_DTD_MODE_TBL_ADDR     (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP5_STD_MODE_TBL_ADDR     (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DP_TRAINING_TBL_ADDR      (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR + 256)
+#define ATOM_STACK_STORAGE_END          (ATOM_STACK_STORAGE_START + 512)
+
+/* The size below is in Kb! */
+#define ATOM_VRAM_RESERVE_SIZE         ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
+
+#define        ATOM_VRAM_OPERATION_FLAGS_MASK         0xC0000000L
+#define ATOM_VRAM_OPERATION_FLAGS_SHIFT        30
+#define        ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION   0x1
+#define        ATOM_VRAM_BLOCK_NEEDS_RESERVATION      0x0
+
+/***********************************************************************************/
+/*  Structure used in VRAM_UsageByFirmwareTable */
+/*  Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */
+/*         at running time. */
+/*  note2: From RV770, the memory is more than 32bit addressable, so we will change */
+/*         ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */
+/*         exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */
+/*         (in offset to start of memory address) is KB aligned instead of byte aligend. */
+/***********************************************************************************/
+#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO                      1
+
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO {
+       ULONG ulStartAddrUsedByFirmware;
+       USHORT usFirmwareUseInKb;
+       USHORT usReserved;
+} ATOM_FIRMWARE_VRAM_RESERVE_INFO;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_FIRMWARE_VRAM_RESERVE_INFO
+           asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+} ATOM_VRAM_USAGE_BY_FIRMWARE;
+
+/****************************************************************************/
+/*  Structure used in GPIO_Pin_LUTTable */
+/****************************************************************************/
+typedef struct _ATOM_GPIO_PIN_ASSIGNMENT {
+       USHORT usGpioPin_AIndex;
+       UCHAR ucGpioPinBitShift;
+       UCHAR ucGPIO_ID;
+} ATOM_GPIO_PIN_ASSIGNMENT;
+
+typedef struct _ATOM_GPIO_PIN_LUT {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
+} ATOM_GPIO_PIN_LUT;
+
+/****************************************************************************/
+/*  Structure used in ComponentVideoInfoTable */
+/****************************************************************************/
+#define GPIO_PIN_ACTIVE_HIGH          0x1
+
+#define MAX_SUPPORTED_CV_STANDARDS    5
+
+/*  definitions for ATOM_D_INFO.ucSettings */
+#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK  0x1F /*  [4:0] */
+#define ATOM_GPIO_SETTINGS_RESERVED_MASK  0x60 /*  [6:5] = must be zeroed out */
+#define ATOM_GPIO_SETTINGS_ACTIVE_MASK    0x80 /*  [7] */
+
+typedef struct _ATOM_GPIO_INFO {
+       USHORT usAOffset;
+       UCHAR ucSettings;
+       UCHAR ucReserved;
+} ATOM_GPIO_INFO;
+
+/*  definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */
+#define ATOM_CV_RESTRICT_FORMAT_SELECTION           0x2
+
+/*  definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */
+#define ATOM_GPIO_DEFAULT_MODE_EN                   0x80       /* [7]; */
+#define ATOM_GPIO_SETTING_PERMODE_MASK              0x7F       /* [6:0] */
+
+/*  definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */
+/* Line 3 out put 5V. */
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A       0x01       /* represent gpio 3 state for 16:9 */
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B       0x02       /* represent gpio 4 state for 16:9 */
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT   0x0
+
+/* Line 3 out put 2.2V */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04       /* represent gpio 3 state for 4:3 Letter box */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08       /* represent gpio 4 state for 4:3 Letter box */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
+
+/* Line 3 out put 0V */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A        0x10       /* represent gpio 3 state for 4:3 */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B        0x20       /* represent gpio 4 state for 4:3 */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT    0x4
+
+#define ATOM_CV_LINE3_ASPECTRATIO_MASK              0x3F       /*  bit [5:0] */
+
+#define ATOM_CV_LINE3_ASPECTRATIO_EXIST             0x80       /* bit 7 */
+
+/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A   3    /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B   4    /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
+
+typedef struct _ATOM_COMPONENT_VIDEO_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usMask_PinRegisterIndex;
+       USHORT usEN_PinRegisterIndex;
+       USHORT usY_PinRegisterIndex;
+       USHORT usA_PinRegisterIndex;
+       UCHAR ucBitShift;
+       UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */
+       ATOM_DTD_FORMAT sReserved;      /*  must be zeroed out */
+       UCHAR ucMiscInfo;
+       UCHAR uc480i;
+       UCHAR uc480p;
+       UCHAR uc720p;
+       UCHAR uc1080i;
+       UCHAR ucLetterBoxMode;
+       UCHAR ucReserved[3];
+       UCHAR ucNumOfWbGpioBlocks;      /* For Component video D-Connector support. If zere, NTSC type connector */
+       ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+       ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+} ATOM_COMPONENT_VIDEO_INFO;
+
+/* ucTableFormatRevision=2 */
+/* ucTableContentRevision=1 */
+typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       UCHAR ucMiscInfo;
+       UCHAR uc480i;
+       UCHAR uc480p;
+       UCHAR uc720p;
+       UCHAR uc1080i;
+       UCHAR ucReserved;
+       UCHAR ucLetterBoxMode;
+       UCHAR ucNumOfWbGpioBlocks;      /* For Component video D-Connector support. If zere, NTSC type connector */
+       ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+       ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+} ATOM_COMPONENT_VIDEO_INFO_V21;
+
+#define ATOM_COMPONENT_VIDEO_INFO_LAST  ATOM_COMPONENT_VIDEO_INFO_V21
+
+/****************************************************************************/
+/*  Structure used in object_InfoTable */
+/****************************************************************************/
+typedef struct _ATOM_OBJECT_HEADER {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usDeviceSupport;
+       USHORT usConnectorObjectTableOffset;
+       USHORT usRouterObjectTableOffset;
+       USHORT usEncoderObjectTableOffset;
+       USHORT usProtectionObjectTableOffset;   /* only available when Protection block is independent. */
+       USHORT usDisplayPathTableOffset;
+} ATOM_OBJECT_HEADER;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH {
+       USHORT usDeviceTag;     /* supported device */
+       USHORT usSize;          /* the size of ATOM_DISPLAY_OBJECT_PATH */
+       USHORT usConnObjectId;  /* Connector Object ID */
+       USHORT usGPUObjectId;   /* GPU ID */
+       USHORT usGraphicObjIds[1];      /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */
+} ATOM_DISPLAY_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE {
+       UCHAR ucNumOfDispPath;
+       UCHAR ucVersion;
+       UCHAR ucPadding[2];
+       ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
+} ATOM_DISPLAY_OBJECT_PATH_TABLE;
+
+typedef struct _ATOM_OBJECT    /* each object has this structure */
+{
+       USHORT usObjectID;
+       USHORT usSrcDstTableOffset;
+       USHORT usRecordOffset;  /* this pointing to a bunch of records defined below */
+       USHORT usReserved;
+} ATOM_OBJECT;
+
+typedef struct _ATOM_OBJECT_TABLE      /* Above 4 object table offset pointing to a bunch of objects all have this structure */
+{
+       UCHAR ucNumberOfObjects;
+       UCHAR ucPadding[3];
+       ATOM_OBJECT asObjects[1];
+} ATOM_OBJECT_TABLE;
+
+typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT      /* usSrcDstTableOffset pointing to this structure */
+{
+       UCHAR ucNumberOfSrc;
+       USHORT usSrcObjectID[1];
+       UCHAR ucNumberOfDst;
+       USHORT usDstObjectID[1];
+} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
+
+/* Related definitions, all records are differnt but they have a commond header */
+typedef struct _ATOM_COMMON_RECORD_HEADER {
+       UCHAR ucRecordType;     /* An emun to indicate the record type */
+       UCHAR ucRecordSize;     /* The size of the whole record in byte */
+} ATOM_COMMON_RECORD_HEADER;
+
+#define ATOM_I2C_RECORD_TYPE                           1
+#define ATOM_HPD_INT_RECORD_TYPE                       2
+#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE             3
+#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE          4
+#define        ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE             5  /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE          6       /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE      7
+#define ATOM_JTAG_RECORD_TYPE                          8       /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE              9
+#define ATOM_ENCODER_DVO_CF_RECORD_TYPE               10
+#define ATOM_CONNECTOR_CF_RECORD_TYPE                 11
+#define        ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE       12
+#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE  13
+#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE                                14
+#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE                                 15
+
+/* Must be updated when new record type is added,equal to that record definition! */
+#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_CONNECTOR_CF_RECORD_TYPE
+
+typedef struct _ATOM_I2C_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       ATOM_I2C_ID_CONFIG sucI2cId;
+       UCHAR ucI2CAddr;        /* The slave address, it's 0 when the record is attached to connector for DDC */
+} ATOM_I2C_RECORD;
+
+typedef struct _ATOM_HPD_INT_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       UCHAR ucHPDIntGPIOID;   /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
+       UCHAR ucPluggged_PinState;
+} ATOM_HPD_INT_RECORD;
+
+typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       UCHAR ucProtectionFlag;
+       UCHAR ucReserved;
+} ATOM_OUTPUT_PROTECTION_RECORD;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG {
+       ULONG ulACPIDeviceEnum; /* Reserved for now */
+       USHORT usDeviceID;      /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */
+       USHORT usPadding;
+} ATOM_CONNECTOR_DEVICE_TAG;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       UCHAR ucNumberOfDevice;
+       UCHAR ucReserved;
+       ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1];       /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */
+} ATOM_CONNECTOR_DEVICE_TAG_RECORD;
+
+typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       UCHAR ucConfigGPIOID;
+       UCHAR ucConfigGPIOState;        /* Set to 1 when it's active high to enable external flow in */
+       UCHAR ucFlowinGPIPID;
+       UCHAR ucExtInGPIPID;
+} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
+
+typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       UCHAR ucCTL1GPIO_ID;
+       UCHAR ucCTL1GPIOState;  /* Set to 1 when it's active high */
+       UCHAR ucCTL2GPIO_ID;
+       UCHAR ucCTL2GPIOState;  /* Set to 1 when it's active high */
+       UCHAR ucCTL3GPIO_ID;
+       UCHAR ucCTL3GPIOState;  /* Set to 1 when it's active high */
+       UCHAR ucCTLFPGA_IN_ID;
+       UCHAR ucPadding[3];
+} ATOM_ENCODER_FPGA_CONTROL_RECORD;
+
+typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       UCHAR ucGPIOID;         /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
+       UCHAR ucTVActiveState;  /* Indicating when the pin==0 or 1 when TV is connected */
+} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
+
+typedef struct _ATOM_JTAG_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       UCHAR ucTMSGPIO_ID;
+       UCHAR ucTMSGPIOState;   /* Set to 1 when it's active high */
+       UCHAR ucTCKGPIO_ID;
+       UCHAR ucTCKGPIOState;   /* Set to 1 when it's active high */
+       UCHAR ucTDOGPIO_ID;
+       UCHAR ucTDOGPIOState;   /* Set to 1 when it's active high */
+       UCHAR ucTDIGPIO_ID;
+       UCHAR ucTDIGPIOState;   /* Set to 1 when it's active high */
+       UCHAR ucPadding[2];
+} ATOM_JTAG_RECORD;
+
+/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */
+typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR {
+       UCHAR ucGPIOID;         /*  GPIO_ID, find the corresponding ID in GPIO_LUT table */
+       UCHAR ucGPIO_PinState;  /*  Pin state showing how to set-up the pin */
+} ATOM_GPIO_PIN_CONTROL_PAIR;
+
+typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       UCHAR ucFlags;          /*  Future expnadibility */
+       UCHAR ucNumberOfPins;   /*  Number of GPIO pins used to control the object */
+       ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1];   /*  the real gpio pin pair determined by number of pins ucNumberOfPins */
+} ATOM_OBJECT_GPIO_CNTL_RECORD;
+
+/* Definitions for GPIO pin state */
+#define GPIO_PIN_TYPE_INPUT             0x00
+#define GPIO_PIN_TYPE_OUTPUT            0x10
+#define GPIO_PIN_TYPE_HW_CONTROL        0x20
+
+/* For GPIO_PIN_TYPE_OUTPUT the following is defined */
+#define GPIO_PIN_OUTPUT_STATE_MASK      0x01
+#define GPIO_PIN_OUTPUT_STATE_SHIFT     0
+#define GPIO_PIN_STATE_ACTIVE_LOW       0x0
+#define GPIO_PIN_STATE_ACTIVE_HIGH      0x1
+
+typedef struct _ATOM_ENCODER_DVO_CF_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       ULONG ulStrengthControl;        /*  DVOA strength control for CF */
+       UCHAR ucPadding[2];
+} ATOM_ENCODER_DVO_CF_RECORD;
+
+/*  value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA   1
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB   2
+
+typedef struct _ATOM_CONNECTOR_CF_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       USHORT usMaxPixClk;
+       UCHAR ucFlowCntlGpioId;
+       UCHAR ucSwapCntlGpioId;
+       UCHAR ucConnectedDvoBundle;
+       UCHAR ucPadding;
+} ATOM_CONNECTOR_CF_RECORD;
+
+typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       ATOM_DTD_FORMAT asTiming;
+} ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
+
+typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;      /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */
+       UCHAR ucSubConnectorType;       /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */
+       UCHAR ucReserved;
+} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
+
+typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       UCHAR ucMuxType;        /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */
+       UCHAR ucMuxControlPin;
+       UCHAR ucMuxState[2];    /* for alligment purpose */
+} ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
+
+typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD {
+       ATOM_COMMON_RECORD_HEADER sheader;
+       UCHAR ucMuxType;
+       UCHAR ucMuxControlPin;
+       UCHAR ucMuxState[2];    /* for alligment purpose */
+} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
+
+/*  define ucMuxType */
+#define ATOM_ROUTER_MUX_PIN_STATE_MASK                                                         0x0f
+#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT            0x01
+
+/****************************************************************************/
+/*  ASIC voltage data table */
+/****************************************************************************/
+typedef struct _ATOM_VOLTAGE_INFO_HEADER {
+       USHORT usVDDCBaseLevel; /* In number of 50mv unit */
+       USHORT usReserved;      /* For possible extension table offset */
+       UCHAR ucNumOfVoltageEntries;
+       UCHAR ucBytesPerVoltageEntry;
+       UCHAR ucVoltageStep;    /* Indicating in how many mv increament is one step, 0.5mv unit */
+       UCHAR ucDefaultVoltageEntry;
+       UCHAR ucVoltageControlI2cLine;
+       UCHAR ucVoltageControlAddress;
+       UCHAR ucVoltageControlOffset;
+} ATOM_VOLTAGE_INFO_HEADER;
+
+typedef struct _ATOM_VOLTAGE_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_VOLTAGE_INFO_HEADER viHeader;
+       UCHAR ucVoltageEntries[64];     /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */
+} ATOM_VOLTAGE_INFO;
+
+typedef struct _ATOM_VOLTAGE_FORMULA {
+       USHORT usVoltageBaseLevel;      /*  In number of 1mv unit */
+       USHORT usVoltageStep;   /*  Indicating in how many mv increament is one step, 1mv unit */
+       UCHAR ucNumOfVoltageEntries;    /*  Number of Voltage Entry, which indicate max Voltage */
+       UCHAR ucFlag;           /*  bit0=0 :step is 1mv =1 0.5mv */
+       UCHAR ucBaseVID;        /*  if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */
+       UCHAR ucReserved;
+       UCHAR ucVIDAdjustEntries[32];   /*  32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */
+} ATOM_VOLTAGE_FORMULA;
+
+typedef struct _ATOM_VOLTAGE_CONTROL {
+       UCHAR ucVoltageControlId;       /* Indicate it is controlled by I2C or GPIO or HW state machine */
+       UCHAR ucVoltageControlI2cLine;
+       UCHAR ucVoltageControlAddress;
+       UCHAR ucVoltageControlOffset;
+       USHORT usGpioPin_AIndex;        /* GPIO_PAD register index */
+       UCHAR ucGpioPinBitShift[9];     /* at most 8 pin support 255 VIDs, termintate with 0xff */
+       UCHAR ucReserved;
+} ATOM_VOLTAGE_CONTROL;
+
+/*  Define ucVoltageControlId */
+#define        VOLTAGE_CONTROLLED_BY_HW                                                        0x00
+#define        VOLTAGE_CONTROLLED_BY_I2C_MASK                          0x7F
+#define        VOLTAGE_CONTROLLED_BY_GPIO                                              0x80
+#define        VOLTAGE_CONTROL_ID_LM64                                                         0x01    /* I2C control, used for R5xx Core Voltage */
+#define        VOLTAGE_CONTROL_ID_DAC                                                          0x02    /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */
+#define        VOLTAGE_CONTROL_ID_VT116xM                                              0x03    /* I2C control, used for R6xx Core Voltage */
+#define VOLTAGE_CONTROL_ID_DS4402                                                      0x04
+
+typedef struct _ATOM_VOLTAGE_OBJECT {
+       UCHAR ucVoltageType;    /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */
+       UCHAR ucSize;           /* Size of Object */
+       ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */
+       ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */
+} ATOM_VOLTAGE_OBJECT;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_VOLTAGE_OBJECT asVoltageObj[3];    /* Info for Voltage control */
+} ATOM_VOLTAGE_OBJECT_INFO;
+
+typedef struct _ATOM_LEAKID_VOLTAGE {
+       UCHAR ucLeakageId;
+       UCHAR ucReserved;
+       USHORT usVoltage;
+} ATOM_LEAKID_VOLTAGE;
+
+typedef struct _ATOM_ASIC_PROFILE_VOLTAGE {
+       UCHAR ucProfileId;
+       UCHAR ucReserved;
+       USHORT usSize;
+       USHORT usEfuseSpareStartAddr;
+       USHORT usFuseIndex[8];  /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */
+       ATOM_LEAKID_VOLTAGE asLeakVol[2];       /* Leakid and relatd voltage */
+} ATOM_ASIC_PROFILE_VOLTAGE;
+
+/* ucProfileId */
+#define        ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE                      1
+#define        ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE                  1
+#define        ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE                                      2
+
+typedef struct _ATOM_ASIC_PROFILING_INFO {
+       ATOM_COMMON_TABLE_HEADER asHeader;
+       ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
+} ATOM_ASIC_PROFILING_INFO;
+
+typedef struct _ATOM_POWER_SOURCE_OBJECT {
+       UCHAR ucPwrSrcId;       /*  Power source */
+       UCHAR ucPwrSensorType;  /*  GPIO, I2C or none */
+       UCHAR ucPwrSensId;      /*  if GPIO detect, it is GPIO id,  if I2C detect, it is I2C id */
+       UCHAR ucPwrSensSlaveAddr;       /*  Slave address if I2C detect */
+       UCHAR ucPwrSensRegIndex;        /*  I2C register Index if I2C detect */
+       UCHAR ucPwrSensRegBitMask;      /*  detect which bit is used if I2C detect */
+       UCHAR ucPwrSensActiveState;     /*  high active or low active */
+       UCHAR ucReserve[3];     /*  reserve */
+       USHORT usSensPwr;       /*  in unit of watt */
+} ATOM_POWER_SOURCE_OBJECT;
+
+typedef struct _ATOM_POWER_SOURCE_INFO {
+       ATOM_COMMON_TABLE_HEADER asHeader;
+       UCHAR asPwrbehave[16];
+       ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
+} ATOM_POWER_SOURCE_INFO;
+
+/* Define ucPwrSrcId */
+#define POWERSOURCE_PCIE_ID1                                           0x00
+#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
+#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
+#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
+#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
+
+/* define ucPwrSensorId */
+#define POWER_SENSOR_ALWAYS                                                    0x00
+#define POWER_SENSOR_GPIO                                                              0x01
+#define POWER_SENSOR_I2C                                                               0x02
+
+/**************************************************************************/
+/*  This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */
+/* Memory SS Info Table */
+/* Define Memory Clock SS chip ID */
+#define ICS91719  1
+#define ICS91720  2
+
+/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */
+typedef struct _ATOM_I2C_DATA_RECORD {
+       UCHAR ucNunberOfBytes;  /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */
+       UCHAR ucI2CData[1];     /* I2C data in bytes, should be less than 16 bytes usually */
+} ATOM_I2C_DATA_RECORD;
+
+/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */
+typedef struct _ATOM_I2C_DEVICE_SETUP_INFO {
+       ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;     /* I2C line and HW/SW assisted cap. */
+       UCHAR ucSSChipID;       /* SS chip being used */
+       UCHAR ucSSChipSlaveAddr;        /* Slave Address to set up this SS chip */
+       UCHAR ucNumOfI2CDataRecords;    /* number of data block */
+       ATOM_I2C_DATA_RECORD asI2CData[1];
+} ATOM_I2C_DEVICE_SETUP_INFO;
+
+/* ========================================================================================== */
+typedef struct _ATOM_ASIC_MVDD_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
+} ATOM_ASIC_MVDD_INFO;
+
+/* ========================================================================================== */
+#define ATOM_MCLK_SS_INFO         ATOM_ASIC_MVDD_INFO
+
+/* ========================================================================================== */
+/**************************************************************************/
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT {
+       ULONG ulTargetClockRange;       /* Clock Out frequence (VCO ), in unit of 10Khz */
+       USHORT usSpreadSpectrumPercentage;      /* in unit of 0.01% */
+       USHORT usSpreadRateInKhz;       /* in unit of kHz, modulation freq */
+       UCHAR ucClockIndication;        /* Indicate which clock source needs SS */
+       UCHAR ucSpreadSpectrumMode;     /* Bit1=0 Down Spread,=1 Center Spread. */
+       UCHAR ucReserved[2];
+} ATOM_ASIC_SS_ASSIGNMENT;
+
+/* Define ucSpreadSpectrumType */
+#define ASIC_INTERNAL_MEMORY_SS                        1
+#define ASIC_INTERNAL_ENGINE_SS                        2
+#define ASIC_INTERNAL_UVD_SS                           3
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
+} ATOM_ASIC_INTERNAL_SS_INFO;
+
+/* ==============================Scratch Pad Definition Portion=============================== */
+#define ATOM_DEVICE_CONNECT_INFO_DEF  0
+#define ATOM_ROM_LOCATION_DEF         1
+#define ATOM_TV_STANDARD_DEF          2
+#define ATOM_ACTIVE_INFO_DEF          3
+#define ATOM_LCD_INFO_DEF             4
+#define ATOM_DOS_REQ_INFO_DEF         5
+#define ATOM_ACC_CHANGE_INFO_DEF      6
+#define ATOM_DOS_MODE_INFO_DEF        7
+#define ATOM_I2C_CHANNEL_STATUS_DEF   8
+#define ATOM_I2C_CHANNEL_STATUS1_DEF  9
+
+/*  BIOS_0_SCRATCH Definition */
+#define ATOM_S0_CRT1_MONO               0x00000001L
+#define ATOM_S0_CRT1_COLOR              0x00000002L
+#define ATOM_S0_CRT1_MASK               (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE_A         0x00000004L
+#define ATOM_S0_TV1_SVIDEO_A            0x00000008L
+#define ATOM_S0_TV1_MASK_A              (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A)
+
+#define ATOM_S0_CV_A                    0x00000010L
+#define ATOM_S0_CV_DIN_A                0x00000020L
+#define ATOM_S0_CV_MASK_A               (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
+
+#define ATOM_S0_CRT2_MONO               0x00000100L
+#define ATOM_S0_CRT2_COLOR              0x00000200L
+#define ATOM_S0_CRT2_MASK               (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE           0x00000400L
+#define ATOM_S0_TV1_SVIDEO              0x00000800L
+#define ATOM_S0_TV1_SCART               0x00004000L
+#define ATOM_S0_TV1_MASK                (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART)
+
+#define ATOM_S0_CV                      0x00001000L
+#define ATOM_S0_CV_DIN                  0x00002000L
+#define ATOM_S0_CV_MASK                 (ATOM_S0_CV+ATOM_S0_CV_DIN)
+
+#define ATOM_S0_DFP1                    0x00010000L
+#define ATOM_S0_DFP2                    0x00020000L
+#define ATOM_S0_LCD1                    0x00040000L
+#define ATOM_S0_LCD2                    0x00080000L
+#define ATOM_S0_TV2                     0x00100000L
+#define ATOM_S0_DFP3                   0x00200000L
+#define ATOM_S0_DFP4                   0x00400000L
+#define ATOM_S0_DFP5                   0x00800000L
+
+#define ATOM_S0_DFP_MASK \
+       (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5)
+
+#define ATOM_S0_FAD_REGISTER_BUG        0x02000000L    /*  If set, indicates we are running a PCIE asic with */
+                                                   /*  the FAD/HDP reg access bug.  Bit is read by DAL */
+
+#define ATOM_S0_THERMAL_STATE_MASK      0x1C000000L
+#define ATOM_S0_THERMAL_STATE_SHIFT     26
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
+#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
+
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC     1
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+
+/* Byte aligned defintion for BIOS usage */
+#define ATOM_S0_CRT1_MONOb0             0x01
+#define ATOM_S0_CRT1_COLORb0            0x02
+#define ATOM_S0_CRT1_MASKb0             (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
+
+#define ATOM_S0_TV1_COMPOSITEb0         0x04
+#define ATOM_S0_TV1_SVIDEOb0            0x08
+#define ATOM_S0_TV1_MASKb0              (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0)
+
+#define ATOM_S0_CVb0                    0x10
+#define ATOM_S0_CV_DINb0                0x20
+#define ATOM_S0_CV_MASKb0               (ATOM_S0_CVb0+ATOM_S0_CV_DINb0)
+
+#define ATOM_S0_CRT2_MONOb1             0x01
+#define ATOM_S0_CRT2_COLORb1            0x02
+#define ATOM_S0_CRT2_MASKb1             (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1)
+
+#define ATOM_S0_TV1_COMPOSITEb1         0x04
+#define ATOM_S0_TV1_SVIDEOb1            0x08
+#define ATOM_S0_TV1_SCARTb1             0x40
+#define ATOM_S0_TV1_MASKb1              (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1)
+
+#define ATOM_S0_CVb1                    0x10
+#define ATOM_S0_CV_DINb1                0x20
+#define ATOM_S0_CV_MASKb1               (ATOM_S0_CVb1+ATOM_S0_CV_DINb1)
+
+#define ATOM_S0_DFP1b2                  0x01
+#define ATOM_S0_DFP2b2                  0x02
+#define ATOM_S0_LCD1b2                  0x04
+#define ATOM_S0_LCD2b2                  0x08
+#define ATOM_S0_TV2b2                   0x10
+#define ATOM_S0_DFP3b2                                                                 0x20
+
+#define ATOM_S0_THERMAL_STATE_MASKb3    0x1C
+#define ATOM_S0_THERMAL_STATE_SHIFTb3   2
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
+#define ATOM_S0_LCD1_SHIFT              18
+
+/*  BIOS_1_SCRATCH Definition */
+#define ATOM_S1_ROM_LOCATION_MASK       0x0000FFFFL
+#define ATOM_S1_PCI_BUS_DEV_MASK        0xFFFF0000L
+
+/*       BIOS_2_SCRATCH Definition */
+#define ATOM_S2_TV1_STANDARD_MASK       0x0000000FL
+#define ATOM_S2_CURRENT_BL_LEVEL_MASK   0x0000FF00L
+#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT  8
+
+#define ATOM_S2_CRT1_DPMS_STATE         0x00010000L
+#define ATOM_S2_LCD1_DPMS_STATE                0x00020000L
+#define ATOM_S2_TV1_DPMS_STATE          0x00040000L
+#define ATOM_S2_DFP1_DPMS_STATE         0x00080000L
+#define ATOM_S2_CRT2_DPMS_STATE         0x00100000L
+#define ATOM_S2_LCD2_DPMS_STATE         0x00200000L
+#define ATOM_S2_TV2_DPMS_STATE          0x00400000L
+#define ATOM_S2_DFP2_DPMS_STATE         0x00800000L
+#define ATOM_S2_CV_DPMS_STATE           0x01000000L
+#define ATOM_S2_DFP3_DPMS_STATE                                        0x02000000L
+#define ATOM_S2_DFP4_DPMS_STATE                                        0x04000000L
+#define ATOM_S2_DFP5_DPMS_STATE                                        0x08000000L
+
+#define ATOM_S2_DFP_DPM_STATE \
+       (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \
+        ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \
+        ATOM_S2_DFP5_DPMS_STATE)
+
+#define ATOM_S2_DEVICE_DPMS_STATE \
+       (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \
+        ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \
+        ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \
+        ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE)
+
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK       0x0C000000L
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE     0x10000000L
+
+#define ATOM_S2_VRI_BRIGHT_ENABLE       0x20000000L
+
+#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE     0x0
+#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE    0x1
+#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE   0x2
+#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE   0x3
+#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
+#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK   0xC0000000L
+
+/* Byte aligned defintion for BIOS usage */
+#define ATOM_S2_TV1_STANDARD_MASKb0     0x0F
+#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
+#define ATOM_S2_CRT1_DPMS_STATEb2       0x01
+#define ATOM_S2_LCD1_DPMS_STATEb2       0x02
+#define ATOM_S2_TV1_DPMS_STATEb2        0x04
+#define ATOM_S2_DFP1_DPMS_STATEb2       0x08
+#define ATOM_S2_CRT2_DPMS_STATEb2       0x10
+#define ATOM_S2_LCD2_DPMS_STATEb2       0x20
+#define ATOM_S2_TV2_DPMS_STATEb2        0x40
+#define ATOM_S2_DFP2_DPMS_STATEb2       0x80
+#define ATOM_S2_CV_DPMS_STATEb3         0x01
+#define ATOM_S2_DFP3_DPMS_STATEb3                              0x02
+#define ATOM_S2_DFP4_DPMS_STATEb3                              0x04
+#define ATOM_S2_DFP5_DPMS_STATEb3                              0x08
+
+#define ATOM_S2_DEVICE_DPMS_MASKw1      0x3FF
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3     0x0C
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3   0x10
+#define ATOM_S2_VRI_BRIGHT_ENABLEb3     0x20
+#define ATOM_S2_ROTATION_STATE_MASKb3   0xC0
+
+/*  BIOS_3_SCRATCH Definition */
+#define ATOM_S3_CRT1_ACTIVE             0x00000001L
+#define ATOM_S3_LCD1_ACTIVE             0x00000002L
+#define ATOM_S3_TV1_ACTIVE              0x00000004L
+#define ATOM_S3_DFP1_ACTIVE             0x00000008L
+#define ATOM_S3_CRT2_ACTIVE             0x00000010L
+#define ATOM_S3_LCD2_ACTIVE             0x00000020L
+#define ATOM_S3_TV2_ACTIVE              0x00000040L
+#define ATOM_S3_DFP2_ACTIVE             0x00000080L
+#define ATOM_S3_CV_ACTIVE               0x00000100L
+#define ATOM_S3_DFP3_ACTIVE                                                    0x00000200L
+#define ATOM_S3_DFP4_ACTIVE                                                    0x00000400L
+#define ATOM_S3_DFP5_ACTIVE                                                    0x00000800L
+
+#define ATOM_S3_DEVICE_ACTIVE_MASK      0x000003FFL
+
+#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE         0x00001000L
+#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
+
+#define ATOM_S3_CRT1_CRTC_ACTIVE        0x00010000L
+#define ATOM_S3_LCD1_CRTC_ACTIVE        0x00020000L
+#define ATOM_S3_TV1_CRTC_ACTIVE         0x00040000L
+#define ATOM_S3_DFP1_CRTC_ACTIVE        0x00080000L
+#define ATOM_S3_CRT2_CRTC_ACTIVE        0x00100000L
+#define ATOM_S3_LCD2_CRTC_ACTIVE        0x00200000L
+#define ATOM_S3_TV2_CRTC_ACTIVE         0x00400000L
+#define ATOM_S3_DFP2_CRTC_ACTIVE        0x00800000L
+#define ATOM_S3_CV_CRTC_ACTIVE          0x01000000L
+#define ATOM_S3_DFP3_CRTC_ACTIVE                               0x02000000L
+#define ATOM_S3_DFP4_CRTC_ACTIVE                               0x04000000L
+#define ATOM_S3_DFP5_CRTC_ACTIVE                               0x08000000L
+
+#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNG    0x20000000L
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCH   0x40000000L
+#define ATOM_S3_RQST_GPU_USE_MIN_PWR    0x80000000L
+
+/* Byte aligned defintion for BIOS usage */
+#define ATOM_S3_CRT1_ACTIVEb0           0x01
+#define ATOM_S3_LCD1_ACTIVEb0           0x02
+#define ATOM_S3_TV1_ACTIVEb0            0x04
+#define ATOM_S3_DFP1_ACTIVEb0           0x08
+#define ATOM_S3_CRT2_ACTIVEb0           0x10
+#define ATOM_S3_LCD2_ACTIVEb0           0x20
+#define ATOM_S3_TV2_ACTIVEb0            0x40
+#define ATOM_S3_DFP2_ACTIVEb0           0x80
+#define ATOM_S3_CV_ACTIVEb1             0x01
+#define ATOM_S3_DFP3_ACTIVEb1                                          0x02
+#define ATOM_S3_DFP4_ACTIVEb1                                          0x04
+#define ATOM_S3_DFP5_ACTIVEb1                                          0x08
+
+#define ATOM_S3_ACTIVE_CRTC1w0          0xFFF
+
+#define ATOM_S3_CRT1_CRTC_ACTIVEb2      0x01
+#define ATOM_S3_LCD1_CRTC_ACTIVEb2      0x02
+#define ATOM_S3_TV1_CRTC_ACTIVEb2       0x04
+#define ATOM_S3_DFP1_CRTC_ACTIVEb2      0x08
+#define ATOM_S3_CRT2_CRTC_ACTIVEb2      0x10
+#define ATOM_S3_LCD2_CRTC_ACTIVEb2      0x20
+#define ATOM_S3_TV2_CRTC_ACTIVEb2       0x40
+#define ATOM_S3_DFP2_CRTC_ACTIVEb2      0x80
+#define ATOM_S3_CV_CRTC_ACTIVEb3        0x01
+#define ATOM_S3_DFP3_CRTC_ACTIVEb3                     0x02
+#define ATOM_S3_DFP4_CRTC_ACTIVEb3                     0x04
+#define ATOM_S3_DFP5_CRTC_ACTIVEb3                     0x08
+
+#define ATOM_S3_ACTIVE_CRTC2w1          0xFFF
+
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
+#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3  0x80
+
+/*  BIOS_4_SCRATCH Definition */
+#define ATOM_S4_LCD1_PANEL_ID_MASK      0x000000FFL
+#define ATOM_S4_LCD1_REFRESH_MASK       0x0000FF00L
+#define ATOM_S4_LCD1_REFRESH_SHIFT      8
+
+/* Byte aligned defintion for BIOS usage */
+#define ATOM_S4_LCD1_PANEL_ID_MASKb0     0x0FF
+#define ATOM_S4_LCD1_REFRESH_MASKb1              ATOM_S4_LCD1_PANEL_ID_MASKb0
+#define ATOM_S4_VRAM_INFO_MASKb2        ATOM_S4_LCD1_PANEL_ID_MASKb0
+
+/*  BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */
+#define ATOM_S5_DOS_REQ_CRT1b0          0x01
+#define ATOM_S5_DOS_REQ_LCD1b0          0x02
+#define ATOM_S5_DOS_REQ_TV1b0           0x04
+#define ATOM_S5_DOS_REQ_DFP1b0          0x08
+#define ATOM_S5_DOS_REQ_CRT2b0          0x10
+#define ATOM_S5_DOS_REQ_LCD2b0          0x20
+#define ATOM_S5_DOS_REQ_TV2b0           0x40
+#define ATOM_S5_DOS_REQ_DFP2b0          0x80
+#define ATOM_S5_DOS_REQ_CVb1            0x01
+#define ATOM_S5_DOS_REQ_DFP3b1                                 0x02
+#define ATOM_S5_DOS_REQ_DFP4b1                                 0x04
+#define ATOM_S5_DOS_REQ_DFP5b1                                 0x08
+
+#define ATOM_S5_DOS_REQ_DEVICEw0        0x03FF
+
+#define ATOM_S5_DOS_REQ_CRT1            0x0001
+#define ATOM_S5_DOS_REQ_LCD1            0x0002
+#define ATOM_S5_DOS_REQ_TV1             0x0004
+#define ATOM_S5_DOS_REQ_DFP1            0x0008
+#define ATOM_S5_DOS_REQ_CRT2            0x0010
+#define ATOM_S5_DOS_REQ_LCD2            0x0020
+#define ATOM_S5_DOS_REQ_TV2             0x0040
+#define ATOM_S5_DOS_REQ_DFP2            0x0080
+#define ATOM_S5_DOS_REQ_CV              0x0100
+#define ATOM_S5_DOS_REQ_DFP3                                           0x0200
+#define ATOM_S5_DOS_REQ_DFP4                                           0x0400
+#define ATOM_S5_DOS_REQ_DFP5                                           0x0800
+
+#define ATOM_S5_DOS_FORCE_CRT1b2        ATOM_S5_DOS_REQ_CRT1b0
+#define ATOM_S5_DOS_FORCE_TV1b2         ATOM_S5_DOS_REQ_TV1b0
+#define ATOM_S5_DOS_FORCE_CRT2b2        ATOM_S5_DOS_REQ_CRT2b0
+#define ATOM_S5_DOS_FORCE_CVb3          ATOM_S5_DOS_REQ_CVb1
+#define ATOM_S5_DOS_FORCE_DEVICEw1 \
+       (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \
+        ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8))
+
+/*  BIOS_6_SCRATCH Definition */
+#define ATOM_S6_DEVICE_CHANGE           0x00000001L
+#define ATOM_S6_SCALER_CHANGE           0x00000002L
+#define ATOM_S6_LID_CHANGE              0x00000004L
+#define ATOM_S6_DOCKING_CHANGE          0x00000008L
+#define ATOM_S6_ACC_MODE                0x00000010L
+#define ATOM_S6_EXT_DESKTOP_MODE        0x00000020L
+#define ATOM_S6_LID_STATE               0x00000040L
+#define ATOM_S6_DOCK_STATE              0x00000080L
+#define ATOM_S6_CRITICAL_STATE          0x00000100L
+#define ATOM_S6_HW_I2C_BUSY_STATE       0x00000200L
+#define ATOM_S6_THERMAL_STATE_CHANGE    0x00000400L
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS   0x00000800L
+#define ATOM_S6_REQ_LCD_EXPANSION_FULL         0x00001000L     /* Normal expansion Request bit for LCD */
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO  0x00002000L     /* Aspect ratio expansion Request bit for LCD */
+
+#define ATOM_S6_DISPLAY_STATE_CHANGE    0x00004000L    /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */
+#define ATOM_S6_I2C_STATE_CHANGE        0x00008000L    /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */
+
+#define ATOM_S6_ACC_REQ_CRT1            0x00010000L
+#define ATOM_S6_ACC_REQ_LCD1            0x00020000L
+#define ATOM_S6_ACC_REQ_TV1             0x00040000L
+#define ATOM_S6_ACC_REQ_DFP1            0x00080000L
+#define ATOM_S6_ACC_REQ_CRT2            0x00100000L
+#define ATOM_S6_ACC_REQ_LCD2            0x00200000L
+#define ATOM_S6_ACC_REQ_TV2             0x00400000L
+#define ATOM_S6_ACC_REQ_DFP2            0x00800000L
+#define ATOM_S6_ACC_REQ_CV              0x01000000L
+#define ATOM_S6_ACC_REQ_DFP3                                           0x02000000L
+#define ATOM_S6_ACC_REQ_DFP4                                           0x04000000L
+#define ATOM_S6_ACC_REQ_DFP5                                           0x08000000L
+
+#define ATOM_S6_ACC_REQ_MASK                0x0FFF0000L
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE    0x10000000L
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH    0x20000000L
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE       0x40000000L
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK  0x80000000L
+
+/* Byte aligned defintion for BIOS usage */
+#define ATOM_S6_DEVICE_CHANGEb0         0x01
+#define ATOM_S6_SCALER_CHANGEb0         0x02
+#define ATOM_S6_LID_CHANGEb0            0x04
+#define ATOM_S6_DOCKING_CHANGEb0        0x08
+#define ATOM_S6_ACC_MODEb0              0x10
+#define ATOM_S6_EXT_DESKTOP_MODEb0      0x20
+#define ATOM_S6_LID_STATEb0             0x40
+#define ATOM_S6_DOCK_STATEb0            0x80
+#define ATOM_S6_CRITICAL_STATEb1        0x01
+#define ATOM_S6_HW_I2C_BUSY_STATEb1     0x02
+#define ATOM_S6_THERMAL_STATE_CHANGEb1  0x04
+#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
+#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1        0x10
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
+
+#define ATOM_S6_ACC_REQ_CRT1b2          0x01
+#define ATOM_S6_ACC_REQ_LCD1b2          0x02
+#define ATOM_S6_ACC_REQ_TV1b2           0x04
+#define ATOM_S6_ACC_REQ_DFP1b2          0x08
+#define ATOM_S6_ACC_REQ_CRT2b2          0x10
+#define ATOM_S6_ACC_REQ_LCD2b2          0x20
+#define ATOM_S6_ACC_REQ_TV2b2           0x40
+#define ATOM_S6_ACC_REQ_DFP2b2          0x80
+#define ATOM_S6_ACC_REQ_CVb3            0x01
+#define ATOM_S6_ACC_REQ_DFP3b3                                 0x02
+#define ATOM_S6_ACC_REQ_DFP4b3                                 0x04
+#define ATOM_S6_ACC_REQ_DFP5b3                                 0x08
+
+#define ATOM_S6_ACC_REQ_DEVICEw1        ATOM_S5_DOS_REQ_DEVICEw0
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3    0x40
+#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3    0x80
+
+#define ATOM_S6_DEVICE_CHANGE_SHIFT             0
+#define ATOM_S6_SCALER_CHANGE_SHIFT             1
+#define ATOM_S6_LID_CHANGE_SHIFT                2
+#define ATOM_S6_DOCKING_CHANGE_SHIFT            3
+#define ATOM_S6_ACC_MODE_SHIFT                  4
+#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT          5
+#define ATOM_S6_LID_STATE_SHIFT                 6
+#define ATOM_S6_DOCK_STATE_SHIFT                7
+#define ATOM_S6_CRITICAL_STATE_SHIFT            8
+#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT         9
+#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT      10
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT     11
+#define ATOM_S6_REQ_SCALER_SHIFT                12
+#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT         13
+#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT      14
+#define ATOM_S6_I2C_STATE_CHANGE_SHIFT          15
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT  28
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT  29
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT     30
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT     31
+
+/*  BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */
+#define ATOM_S7_DOS_MODE_TYPEb0             0x03
+#define ATOM_S7_DOS_MODE_VGAb0              0x00
+#define ATOM_S7_DOS_MODE_VESAb0             0x01
+#define ATOM_S7_DOS_MODE_EXTb0              0x02
+#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0      0x0C
+#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0     0xF0
+#define ATOM_S7_DOS_8BIT_DAC_ENb1           0x01
+#define ATOM_S7_DOS_MODE_NUMBERw1           0x0FFFF
+
+#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT       8
+
+/*  BIOS_8_SCRATCH Definition */
+#define ATOM_S8_I2C_CHANNEL_BUSY_MASK       0x00000FFFF
+#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK     0x0FFFF0000
+
+#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT      0
+#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT       16
+
+/*  BIOS_9_SCRATCH Definition */
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK  0x0000FFFF
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
+#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK    0xFFFF0000
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
+#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT   16
+#endif
+
+#define ATOM_FLAG_SET                         0x20
+#define ATOM_FLAG_CLEAR                       0
+#define CLEAR_ATOM_S6_ACC_MODE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
+#define SET_ATOM_S6_DEVICE_CHANGE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SCALER_CHANGE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_LID_CHANGE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_LID_STATE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\
+        ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_LID_STATE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_DOCK_CHANGE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \
+        ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_DOCK_STATE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_DOCK_STATE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_THERMAL_STATE_CHANGE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_CRITICAL_STATE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_CRITICAL_STATE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_REQ_SCALER \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_REQ_SCALER \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_REQ_SCALER_ARATIO \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_I2C_STATE_CHANGE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DEVICE_RECONFIG \
+       ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+        ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S0_LCD1 \
+       ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \
+        ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
+#define SET_ATOM_S7_DOS_8BIT_DAC_EN \
+       ((ATOM_DOS_MODE_INFO_DEF << 8) | \
+        ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \
+       ((ATOM_DOS_MODE_INFO_DEF << 8) | \
+        ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+
+/****************************************************************************/
+/* Portion II: Definitinos only used in Driver */
+/****************************************************************************/
+
+/*  Macros used by driver */
+
+#define        GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET)  ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
+
+#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
+#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
+
+/****************************************************************************/
+/* Portion III: Definitinos only used in VBIOS */
+/****************************************************************************/
+#define ATOM_DAC_SRC                                   0x80
+#define ATOM_SRC_DAC1                                  0
+#define ATOM_SRC_DAC2                                  0x80
+
+#ifdef UEFI_BUILD
+#define        USHORT  UTEMP
+#endif
+
+typedef struct _MEMORY_PLLINIT_PARAMETERS {
+       ULONG ulTargetMemoryClock;      /* In 10Khz unit */
+       UCHAR ucAction;         /* not define yet */
+       UCHAR ucFbDiv_Hi;       /* Fbdiv Hi byte */
+       UCHAR ucFbDiv;          /* FB value */
+       UCHAR ucPostDiv;        /* Post div */
+} MEMORY_PLLINIT_PARAMETERS;
+
+#define MEMORY_PLLINIT_PS_ALLOCATION  MEMORY_PLLINIT_PARAMETERS
+
+#define        GPIO_PIN_WRITE                                                                                                  0x01
+#define        GPIO_PIN_READ                                                                                                           0x00
+
+typedef struct _GPIO_PIN_CONTROL_PARAMETERS {
+       UCHAR ucGPIO_ID;        /* return value, read from GPIO pins */
+       UCHAR ucGPIOBitShift;   /* define which bit in uGPIOBitVal need to be update */
+       UCHAR ucGPIOBitVal;     /* Set/Reset corresponding bit defined in ucGPIOBitMask */
+       UCHAR ucAction;         /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */
+} GPIO_PIN_CONTROL_PARAMETERS;
+
+typedef struct _ENABLE_SCALER_PARAMETERS {
+       UCHAR ucScaler;         /*  ATOM_SCALER1, ATOM_SCALER2 */
+       UCHAR ucEnable;         /*  ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */
+       UCHAR ucTVStandard;     /*  */
+       UCHAR ucPadding[1];
+} ENABLE_SCALER_PARAMETERS;
+#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
+
+/* ucEnable: */
+#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION    0
+#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION  1
+#define SCALER_ENABLE_2TAP_ALPHA_MODE               2
+#define SCALER_ENABLE_MULTITAP_MODE                 3
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS {
+       ULONG usHWIconHorzVertPosn;     /*  Hardware Icon Vertical position */
+       UCHAR ucHWIconVertOffset;       /*  Hardware Icon Vertical offset */
+       UCHAR ucHWIconHorzOffset;       /*  Hardware Icon Horizontal offset */
+       UCHAR ucSelection;      /*  ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */
+       UCHAR ucEnable;         /*  ATOM_ENABLE or ATOM_DISABLE */
+} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION {
+       ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
+       ENABLE_CRTC_PARAMETERS sReserved;
+} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS {
+       USHORT usHight;         /*  Image Hight */
+       USHORT usWidth;         /*  Image Width */
+       UCHAR ucSurface;        /*  Surface 1 or 2 */
+       UCHAR ucPadding[3];
+} ENABLE_GRAPH_SURFACE_PARAMETERS;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 {
+       USHORT usHight;         /*  Image Hight */
+       USHORT usWidth;         /*  Image Width */
+       UCHAR ucSurface;        /*  Surface 1 or 2 */
+       UCHAR ucEnable;         /*  ATOM_ENABLE or ATOM_DISABLE */
+       UCHAR ucPadding[2];
+} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION {
+       ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
+       ENABLE_YUV_PS_ALLOCATION sReserved;     /*  Don't set this one */
+} ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
+
+typedef struct _MEMORY_CLEAN_UP_PARAMETERS {
+       USHORT usMemoryStart;   /* in 8Kb boundry, offset from memory base address */
+       USHORT usMemorySize;    /* 8Kb blocks aligned */
+} MEMORY_CLEAN_UP_PARAMETERS;
+#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
+
+typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS {
+       USHORT usX_Size;        /* When use as input parameter, usX_Size indicates which CRTC */
+       USHORT usY_Size;
+} GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
+
+typedef struct _INDIRECT_IO_ACCESS {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       UCHAR IOAccessSequence[256];
+} INDIRECT_IO_ACCESS;
+
+#define INDIRECT_READ              0x00
+#define INDIRECT_WRITE             0x80
+
+#define INDIRECT_IO_MM             0
+#define INDIRECT_IO_PLL            1
+#define INDIRECT_IO_MC             2
+#define INDIRECT_IO_PCIE           3
+#define INDIRECT_IO_PCIEP          4
+#define INDIRECT_IO_NBMISC         5
+
+#define INDIRECT_IO_PLL_READ       INDIRECT_IO_PLL   | INDIRECT_READ
+#define INDIRECT_IO_PLL_WRITE      INDIRECT_IO_PLL   | INDIRECT_WRITE
+#define INDIRECT_IO_MC_READ        INDIRECT_IO_MC    | INDIRECT_READ
+#define INDIRECT_IO_MC_WRITE       INDIRECT_IO_MC    | INDIRECT_WRITE
+#define INDIRECT_IO_PCIE_READ      INDIRECT_IO_PCIE  | INDIRECT_READ
+#define INDIRECT_IO_PCIE_WRITE     INDIRECT_IO_PCIE  | INDIRECT_WRITE
+#define INDIRECT_IO_PCIEP_READ     INDIRECT_IO_PCIEP | INDIRECT_READ
+#define INDIRECT_IO_PCIEP_WRITE    INDIRECT_IO_PCIEP | INDIRECT_WRITE
+#define INDIRECT_IO_NBMISC_READ    INDIRECT_IO_NBMISC | INDIRECT_READ
+#define INDIRECT_IO_NBMISC_WRITE   INDIRECT_IO_NBMISC | INDIRECT_WRITE
+
+typedef struct _ATOM_OEM_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+} ATOM_OEM_INFO;
+
+typedef struct _ATOM_TV_MODE {
+       UCHAR ucVMode_Num;      /* Video mode number */
+       UCHAR ucTV_Mode_Num;    /* Internal TV mode number */
+} ATOM_TV_MODE;
+
+typedef struct _ATOM_BIOS_INT_TVSTD_MODE {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usTV_Mode_LUT_Offset;    /*  Pointer to standard to internal number conversion table */
+       USHORT usTV_FIFO_Offset;        /*  Pointer to FIFO entry table */
+       USHORT usNTSC_Tbl_Offset;       /*  Pointer to SDTV_Mode_NTSC table */
+       USHORT usPAL_Tbl_Offset;        /*  Pointer to SDTV_Mode_PAL table */
+       USHORT usCV_Tbl_Offset; /*  Pointer to SDTV_Mode_PAL table */
+} ATOM_BIOS_INT_TVSTD_MODE;
+
+typedef struct _ATOM_TV_MODE_SCALER_PTR {
+       USHORT ucFilter0_Offset;        /* Pointer to filter format 0 coefficients */
+       USHORT usFilter1_Offset;        /* Pointer to filter format 0 coefficients */
+       UCHAR ucTV_Mode_Num;
+} ATOM_TV_MODE_SCALER_PTR;
+
+typedef struct _ATOM_STANDARD_VESA_TIMING {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_DTD_FORMAT aModeTimings[16];       /*  16 is not the real array number, just for initial allocation */
+} ATOM_STANDARD_VESA_TIMING;
+
+typedef struct _ATOM_STD_FORMAT {
+       USHORT usSTD_HDisp;
+       USHORT usSTD_VDisp;
+       USHORT usSTD_RefreshRate;
+       USHORT usReserved;
+} ATOM_STD_FORMAT;
+
+typedef struct _ATOM_VESA_TO_EXTENDED_MODE {
+       USHORT usVESA_ModeNumber;
+       USHORT usExtendedModeNumber;
+} ATOM_VESA_TO_EXTENDED_MODE;
+
+typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
+} ATOM_VESA_TO_INTENAL_MODE_LUT;
+
+/*************** ATOM Memory Related Data Structure ***********************/
+typedef struct _ATOM_MEMORY_VENDOR_BLOCK {
+       UCHAR ucMemoryType;
+       UCHAR ucMemoryVendor;
+       UCHAR ucAdjMCId;
+       UCHAR ucDynClkId;
+       ULONG ulDllResetClkRange;
+} ATOM_MEMORY_VENDOR_BLOCK;
+
+typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG {
+#if ATOM_BIG_ENDIAN
+       ULONG ucMemBlkId:8;
+       ULONG ulMemClockRange:24;
+#else
+       ULONG ulMemClockRange:24;
+       ULONG ucMemBlkId:8;
+#endif
+} ATOM_MEMORY_SETTING_ID_CONFIG;
+
+typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS {
+       ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
+       ULONG ulAccess;
+} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
+
+typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK {
+       ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
+       ULONG aulMemData[1];
+} ATOM_MEMORY_SETTING_DATA_BLOCK;
+
+typedef struct _ATOM_INIT_REG_INDEX_FORMAT {
+       USHORT usRegIndex;      /*  MC register index */
+       UCHAR ucPreRegDataLength;       /*  offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */
+} ATOM_INIT_REG_INDEX_FORMAT;
+
+typedef struct _ATOM_INIT_REG_BLOCK {
+       USHORT usRegIndexTblSize;       /* size of asRegIndexBuf */
+       USHORT usRegDataBlkSize;        /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */
+       ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
+       ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
+} ATOM_INIT_REG_BLOCK;
+
+#define END_OF_REG_INDEX_BLOCK  0x0ffff
+#define END_OF_REG_DATA_BLOCK   0x00000000
+#define ATOM_INIT_REG_MASK_FLAG 0x80
+#define        CLOCK_RANGE_HIGHEST                     0x00ffffff
+
+#define VALUE_DWORD             SIZEOF ULONG
+#define VALUE_SAME_AS_ABOVE     0
+#define VALUE_MASK_DWORD        0x84
+
+#define INDEX_ACCESS_RANGE_BEGIN           (VALUE_DWORD + 1)
+#define INDEX_ACCESS_RANGE_END             (INDEX_ACCESS_RANGE_BEGIN + 1)
+#define VALUE_INDEX_ACCESS_SINGLE          (INDEX_ACCESS_RANGE_END + 1)
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usAdjustARB_SEQDataOffset;
+       USHORT usMCInitMemTypeTblOffset;
+       USHORT usMCInitCommonTblOffset;
+       USHORT usMCInitPowerDownTblOffset;
+       ULONG ulARB_SEQDataBuf[32];
+       ATOM_INIT_REG_BLOCK asMCInitMemType;
+       ATOM_INIT_REG_BLOCK asMCInitCommon;
+} ATOM_MC_INIT_PARAM_TABLE;
+
+#define _4Mx16              0x2
+#define _4Mx32              0x3
+#define _8Mx16              0x12
+#define _8Mx32              0x13
+#define _16Mx16             0x22
+#define _16Mx32             0x23
+#define _32Mx16             0x32
+#define _32Mx32             0x33
+#define _64Mx8              0x41
+#define _64Mx16             0x42
+
+#define SAMSUNG             0x1
+#define INFINEON            0x2
+#define ELPIDA              0x3
+#define ETRON               0x4
+#define NANYA               0x5
+#define HYNIX               0x6
+#define MOSEL               0x7
+#define WINBOND             0x8
+#define ESMT                0x9
+#define MICRON              0xF
+
+#define QIMONDA             INFINEON
+#define PROMOS              MOSEL
+
+/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */
+
+#define UCODE_ROM_START_ADDRESS                0x1c000
+#define        UCODE_SIGNATURE                 0x4375434d      /*  'MCuC' - MC uCode */
+
+/* uCode block header for reference */
+
+typedef struct _MCuCodeHeader {
+       ULONG ulSignature;
+       UCHAR ucRevision;
+       UCHAR ucChecksum;
+       UCHAR ucReserved1;
+       UCHAR ucReserved2;
+       USHORT usParametersLength;
+       USHORT usUCodeLength;
+       USHORT usReserved1;
+       USHORT usReserved2;
+} MCuCodeHeader;
+
+/* //////////////////////////////////////////////////////////////////////////////// */
+
+#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
+
+#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
+typedef struct _ATOM_VRAM_MODULE_V1 {
+       ULONG ulReserved;
+       USHORT usEMRSValue;
+       USHORT usMRSValue;
+       USHORT usReserved;
+       UCHAR ucExtMemoryID;    /*  An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
+       UCHAR ucMemoryType;     /*  [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */
+       UCHAR ucMemoryVenderID; /*  Predefined,never change across designs or memory type/vender */
+       UCHAR ucMemoryDeviceCfg;        /*  [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
+       UCHAR ucRow;            /*  Number of Row,in power of 2; */
+       UCHAR ucColumn;         /*  Number of Column,in power of 2; */
+       UCHAR ucBank;           /*  Nunber of Bank; */
+       UCHAR ucRank;           /*  Number of Rank, in power of 2 */
+       UCHAR ucChannelNum;     /*  Number of channel; */
+       UCHAR ucChannelConfig;  /*  [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
+       UCHAR ucDefaultMVDDQ_ID;        /*  Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
+       UCHAR ucDefaultMVDDC_ID;        /*  Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
+       UCHAR ucReserved[2];
+} ATOM_VRAM_MODULE_V1;
+
+typedef struct _ATOM_VRAM_MODULE_V2 {
+       ULONG ulReserved;
+       ULONG ulFlags;          /*  To enable/disable functionalities based on memory type */
+       ULONG ulEngineClock;    /*  Override of default engine clock for particular memory type */
+       ULONG ulMemoryClock;    /*  Override of default memory clock for particular memory type */
+       USHORT usEMRS2Value;    /*  EMRS2 Value is used for GDDR2 and GDDR4 memory type */
+       USHORT usEMRS3Value;    /*  EMRS3 Value is used for GDDR2 and GDDR4 memory type */
+       USHORT usEMRSValue;
+       USHORT usMRSValue;
+       USHORT usReserved;
+       UCHAR ucExtMemoryID;    /*  An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
+       UCHAR ucMemoryType;     /*  [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
+       UCHAR ucMemoryVenderID; /*  Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
+       UCHAR ucMemoryDeviceCfg;        /*  [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
+       UCHAR ucRow;            /*  Number of Row,in power of 2; */
+       UCHAR ucColumn;         /*  Number of Column,in power of 2; */
+       UCHAR ucBank;           /*  Nunber of Bank; */
+       UCHAR ucRank;           /*  Number of Rank, in power of 2 */
+       UCHAR ucChannelNum;     /*  Number of channel; */
+       UCHAR ucChannelConfig;  /*  [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
+       UCHAR ucDefaultMVDDQ_ID;        /*  Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
+       UCHAR ucDefaultMVDDC_ID;        /*  Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
+       UCHAR ucRefreshRateFactor;
+       UCHAR ucReserved[3];
+} ATOM_VRAM_MODULE_V2;
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT {
+       ULONG ulClkRange;       /*  memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
+       union {
+               USHORT usMRS;   /*  mode register */
+               USHORT usDDR3_MR0;
+       };
+       union {
+               USHORT usEMRS;  /*  extended mode register */
+               USHORT usDDR3_MR1;
+       };
+       UCHAR ucCL;             /*  CAS latency */
+       UCHAR ucWL;             /*  WRITE Latency */
+       UCHAR uctRAS;           /*  tRAS */
+       UCHAR uctRC;            /*  tRC */
+       UCHAR uctRFC;           /*  tRFC */
+       UCHAR uctRCDR;          /*  tRCDR */
+       UCHAR uctRCDW;          /*  tRCDW */
+       UCHAR uctRP;            /*  tRP */
+       UCHAR uctRRD;           /*  tRRD */
+       UCHAR uctWR;            /*  tWR */
+       UCHAR uctWTR;           /*  tWTR */
+       UCHAR uctPDIX;          /*  tPDIX */
+       UCHAR uctFAW;           /*  tFAW */
+       UCHAR uctAOND;          /*  tAOND */
+       union {
+               struct {
+                       UCHAR ucflag;   /*  flag to control memory timing calculation. bit0= control EMRS2 Infineon */
+                       UCHAR ucReserved;
+               };
+               USHORT usDDR3_MR2;
+       };
+} ATOM_MEMORY_TIMING_FORMAT;
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 {
+       ULONG ulClkRange;       /*  memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
+       USHORT usMRS;           /*  mode register */
+       USHORT usEMRS;          /*  extended mode register */
+       UCHAR ucCL;             /*  CAS latency */
+       UCHAR ucWL;             /*  WRITE Latency */
+       UCHAR uctRAS;           /*  tRAS */
+       UCHAR uctRC;            /*  tRC */
+       UCHAR uctRFC;           /*  tRFC */
+       UCHAR uctRCDR;          /*  tRCDR */
+       UCHAR uctRCDW;          /*  tRCDW */
+       UCHAR uctRP;            /*  tRP */
+       UCHAR uctRRD;           /*  tRRD */
+       UCHAR uctWR;            /*  tWR */
+       UCHAR uctWTR;           /*  tWTR */
+       UCHAR uctPDIX;          /*  tPDIX */
+       UCHAR uctFAW;           /*  tFAW */
+       UCHAR uctAOND;          /*  tAOND */
+       UCHAR ucflag;           /*  flag to control memory timing calculation. bit0= control EMRS2 Infineon */
+/* ///////////////////////GDDR parameters/////////////////////////////////// */
+       UCHAR uctCCDL;          /*  */
+       UCHAR uctCRCRL;         /*  */
+       UCHAR uctCRCWL;         /*  */
+       UCHAR uctCKE;           /*  */
+       UCHAR uctCKRSE;         /*  */
+       UCHAR uctCKRSX;         /*  */
+       UCHAR uctFAW32;         /*  */
+       UCHAR ucReserved1;      /*  */
+       UCHAR ucReserved2;      /*  */
+       UCHAR ucTerminator;
+} ATOM_MEMORY_TIMING_FORMAT_V1;
+
+typedef struct _ATOM_MEMORY_FORMAT {
+       ULONG ulDllDisClock;    /*  memory DLL will be disable when target memory clock is below this clock */
+       union {
+               USHORT usEMRS2Value;    /*  EMRS2 Value is used for GDDR2 and GDDR4 memory type */
+               USHORT usDDR3_Reserved; /*  Not used for DDR3 memory */
+       };
+       union {
+               USHORT usEMRS3Value;    /*  EMRS3 Value is used for GDDR2 and GDDR4 memory type */
+               USHORT usDDR3_MR3;      /*  Used for DDR3 memory */
+       };
+       UCHAR ucMemoryType;     /*  [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
+       UCHAR ucMemoryVenderID; /*  Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
+       UCHAR ucRow;            /*  Number of Row,in power of 2; */
+       UCHAR ucColumn;         /*  Number of Column,in power of 2; */
+       UCHAR ucBank;           /*  Nunber of Bank; */
+       UCHAR ucRank;           /*  Number of Rank, in power of 2 */
+       UCHAR ucBurstSize;      /*  burst size, 0= burst size=4  1= burst size=8 */
+       UCHAR ucDllDisBit;      /*  position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */
+       UCHAR ucRefreshRateFactor;      /*  memory refresh rate in unit of ms */
+       UCHAR ucDensity;        /*  _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
+       UCHAR ucPreamble;       /* [7:4] Write Preamble, [3:0] Read Preamble */
+       UCHAR ucMemAttrib;      /*  Memory Device Addribute, like RDBI/WDBI etc */
+       ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];       /* Memory Timing block sort from lower clock to higher clock */
+} ATOM_MEMORY_FORMAT;
+
+typedef struct _ATOM_VRAM_MODULE_V3 {
+       ULONG ulChannelMapCfg;  /*  board dependent paramenter:Channel combination */
+       USHORT usSize;          /*  size of ATOM_VRAM_MODULE_V3 */
+       USHORT usDefaultMVDDQ;  /*  board dependent parameter:Default Memory Core Voltage */
+       USHORT usDefaultMVDDC;  /*  board dependent parameter:Default Memory IO Voltage */
+       UCHAR ucExtMemoryID;    /*  An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
+       UCHAR ucChannelNum;     /*  board dependent parameter:Number of channel; */
+       UCHAR ucChannelSize;    /*  board dependent parameter:32bit or 64bit */
+       UCHAR ucVREFI;          /*  board dependnt parameter: EXT or INT +160mv to -140mv */
+       UCHAR ucNPL_RT;         /*  board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
+       UCHAR ucFlag;           /*  To enable/disable functionalities based on memory type */
+       ATOM_MEMORY_FORMAT asMemory;    /*  describ all of video memory parameters from memory spec */
+} ATOM_VRAM_MODULE_V3;
+
+/* ATOM_VRAM_MODULE_V3.ucNPL_RT */
+#define NPL_RT_MASK                                                                                                                    0x0f
+#define BATTERY_ODT_MASK                                                                                               0xc0
+
+#define ATOM_VRAM_MODULE                ATOM_VRAM_MODULE_V3
+
+typedef struct _ATOM_VRAM_MODULE_V4 {
+       ULONG ulChannelMapCfg;  /*  board dependent parameter: Channel combination */
+       USHORT usModuleSize;    /*  size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
+       USHORT usPrivateReserved;       /*  BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
+       /*  MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
+       USHORT usReserved;
+       UCHAR ucExtMemoryID;    /*  An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
+       UCHAR ucMemoryType;     /*  [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
+       UCHAR ucChannelNum;     /*  Number of channels present in this module config */
+       UCHAR ucChannelWidth;   /*  0 - 32 bits; 1 - 64 bits */
+       UCHAR ucDensity;        /*  _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
+       UCHAR ucFlag;           /*  To enable/disable functionalities based on memory type */
+       UCHAR ucMisc;           /*  bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8 */
+       UCHAR ucVREFI;          /*  board dependent parameter */
+       UCHAR ucNPL_RT;         /*  board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
+       UCHAR ucPreamble;       /*  [7:4] Write Preamble, [3:0] Read Preamble */
+       UCHAR ucMemorySize;     /*  BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
+       /*  Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
+       UCHAR ucReserved[3];
+
+/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
+       union {
+               USHORT usEMRS2Value;    /*  EMRS2 Value is used for GDDR2 and GDDR4 memory type */
+               USHORT usDDR3_Reserved;
+       };
+       union {
+               USHORT usEMRS3Value;    /*  EMRS3 Value is used for GDDR2 and GDDR4 memory type */
+               USHORT usDDR3_MR3;      /*  Used for DDR3 memory */
+       };
+       UCHAR ucMemoryVenderID; /*  Predefined, If not predefined, vendor detection table gets executed */
+       UCHAR ucRefreshRateFactor;      /*  [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
+       UCHAR ucReserved2[2];
+       ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];       /* Memory Timing block sort from lower clock to higher clock */
+} ATOM_VRAM_MODULE_V4;
+
+#define VRAM_MODULE_V4_MISC_RANK_MASK       0x3
+#define VRAM_MODULE_V4_MISC_DUAL_RANK       0x1
+#define VRAM_MODULE_V4_MISC_BL_MASK         0x4
+#define VRAM_MODULE_V4_MISC_BL8             0x4
+#define VRAM_MODULE_V4_MISC_DUAL_CS         0x10
+
+typedef struct _ATOM_VRAM_MODULE_V5 {
+       ULONG ulChannelMapCfg;  /*  board dependent parameter: Channel combination */
+       USHORT usModuleSize;    /*  size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
+       USHORT usPrivateReserved;       /*  BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
+       /*  MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
+       USHORT usReserved;
+       UCHAR ucExtMemoryID;    /*  An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
+       UCHAR ucMemoryType;     /*  [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
+       UCHAR ucChannelNum;     /*  Number of channels present in this module config */
+       UCHAR ucChannelWidth;   /*  0 - 32 bits; 1 - 64 bits */
+       UCHAR ucDensity;        /*  _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
+       UCHAR ucFlag;           /*  To enable/disable functionalities based on memory type */
+       UCHAR ucMisc;           /*  bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8 */
+       UCHAR ucVREFI;          /*  board dependent parameter */
+       UCHAR ucNPL_RT;         /*  board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
+       UCHAR ucPreamble;       /*  [7:4] Write Preamble, [3:0] Read Preamble */
+       UCHAR ucMemorySize;     /*  BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
+       /*  Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
+       UCHAR ucReserved[3];
+
+/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
+       USHORT usEMRS2Value;    /*  EMRS2 Value is used for GDDR2 and GDDR4 memory type */
+       USHORT usEMRS3Value;    /*  EMRS3 Value is used for GDDR2 and GDDR4 memory type */
+       UCHAR ucMemoryVenderID; /*  Predefined, If not predefined, vendor detection table gets executed */
+       UCHAR ucRefreshRateFactor;      /*  [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
+       UCHAR ucFIFODepth;      /*  FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */
+       UCHAR ucCDR_Bandwidth;  /*  [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */
+       ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];    /* Memory Timing block sort from lower clock to higher clock */
+} ATOM_VRAM_MODULE_V5;
+
+typedef struct _ATOM_VRAM_INFO_V2 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       UCHAR ucNumOfVRAMModule;
+       ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];     /*  just for allocation, real number of blocks is in ucNumOfVRAMModule; */
+} ATOM_VRAM_INFO_V2;
+
+typedef struct _ATOM_VRAM_INFO_V3 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usMemAdjustTblOffset;    /*  offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
+       USHORT usMemClkPatchTblOffset;  /*      offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
+       USHORT usRerseved;
+       UCHAR aVID_PinsShift[9];        /*  8 bit strap maximum+terminator */
+       UCHAR ucNumOfVRAMModule;
+       ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];     /*  just for allocation, real number of blocks is in ucNumOfVRAMModule; */
+       ATOM_INIT_REG_BLOCK asMemPatch; /*  for allocation */
+       /*      ATOM_INIT_REG_BLOCK                              aMemAdjust; */
+} ATOM_VRAM_INFO_V3;
+
+#define        ATOM_VRAM_INFO_LAST          ATOM_VRAM_INFO_V3
+
+typedef struct _ATOM_VRAM_INFO_V4 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usMemAdjustTblOffset;    /*  offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
+       USHORT usMemClkPatchTblOffset;  /*      offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
+       USHORT usRerseved;
+       UCHAR ucMemDQ7_0ByteRemap;      /*  DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */
+       ULONG ulMemDQ7_0BitRemap;       /*  each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */
+       UCHAR ucReservde[4];
+       UCHAR ucNumOfVRAMModule;
+       ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];  /*  just for allocation, real number of blocks is in ucNumOfVRAMModule; */
+       ATOM_INIT_REG_BLOCK asMemPatch; /*  for allocation */
+       /*      ATOM_INIT_REG_BLOCK                              aMemAdjust; */
+} ATOM_VRAM_INFO_V4;
+
+typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       UCHAR aVID_PinsShift[9];        /* 8 bit strap maximum+terminator */
+} ATOM_VRAM_GPIO_DETECTION_INFO;
+
+typedef struct _ATOM_MEMORY_TRAINING_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       UCHAR ucTrainingLoop;
+       UCHAR ucReserved[3];
+       ATOM_INIT_REG_BLOCK asMemTrainingSetting;
+} ATOM_MEMORY_TRAINING_INFO;
+
+typedef struct SW_I2C_CNTL_DATA_PARAMETERS {
+       UCHAR ucControl;
+       UCHAR ucData;
+       UCHAR ucSatus;
+       UCHAR ucTemp;
+} SW_I2C_CNTL_DATA_PARAMETERS;
+
+#define SW_I2C_CNTL_DATA_PS_ALLOCATION  SW_I2C_CNTL_DATA_PARAMETERS
+
+typedef struct _SW_I2C_IO_DATA_PARAMETERS {
+       USHORT GPIO_Info;
+       UCHAR ucAct;
+       UCHAR ucData;
+} SW_I2C_IO_DATA_PARAMETERS;
+
+#define SW_I2C_IO_DATA_PS_ALLOCATION  SW_I2C_IO_DATA_PARAMETERS
+
+/****************************SW I2C CNTL DEFINITIONS**********************/
+#define SW_I2C_IO_RESET       0
+#define SW_I2C_IO_GET         1
+#define SW_I2C_IO_DRIVE       2
+#define SW_I2C_IO_SET         3
+#define SW_I2C_IO_START       4
+
+#define SW_I2C_IO_CLOCK       0
+#define SW_I2C_IO_DATA        0x80
+
+#define SW_I2C_IO_ZERO        0
+#define SW_I2C_IO_ONE         0x100
+
+#define SW_I2C_CNTL_READ      0
+#define SW_I2C_CNTL_WRITE     1
+#define SW_I2C_CNTL_START     2
+#define SW_I2C_CNTL_STOP      3
+#define SW_I2C_CNTL_OPEN      4
+#define SW_I2C_CNTL_CLOSE     5
+#define SW_I2C_CNTL_WRITE1BIT 6
+
+/* ==============================VESA definition Portion=============================== */
+#define VESA_OEM_PRODUCT_REV                               '01.00'
+#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT            0xBB       /* refer to VBE spec p.32, no TTY support */
+#define VESA_MODE_WIN_ATTRIBUTE                                                     7
+#define VESA_WIN_SIZE                                                                                       64
+
+typedef struct _PTR_32_BIT_STRUCTURE {
+       USHORT Offset16;
+       USHORT Segment16;
+} PTR_32_BIT_STRUCTURE;
+
+typedef union _PTR_32_BIT_UNION {
+       PTR_32_BIT_STRUCTURE SegmentOffset;
+       ULONG Ptr32_Bit;
+} PTR_32_BIT_UNION;
+
+typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE {
+       UCHAR VbeSignature[4];
+       USHORT VbeVersion;
+       PTR_32_BIT_UNION OemStringPtr;
+       UCHAR Capabilities[4];
+       PTR_32_BIT_UNION VideoModePtr;
+       USHORT TotalMemory;
+} VBE_1_2_INFO_BLOCK_UPDATABLE;
+
+typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE {
+       VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
+       USHORT OemSoftRev;
+       PTR_32_BIT_UNION OemVendorNamePtr;
+       PTR_32_BIT_UNION OemProductNamePtr;
+       PTR_32_BIT_UNION OemProductRevPtr;
+} VBE_2_0_INFO_BLOCK_UPDATABLE;
+
+typedef union _VBE_VERSION_UNION {
+       VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
+       VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
+} VBE_VERSION_UNION;
+
+typedef struct _VBE_INFO_BLOCK {
+       VBE_VERSION_UNION UpdatableVBE_Info;
+       UCHAR Reserved[222];
+       UCHAR OemData[256];
+} VBE_INFO_BLOCK;
+
+typedef struct _VBE_FP_INFO {
+       USHORT HSize;
+       USHORT VSize;
+       USHORT FPType;
+       UCHAR RedBPP;
+       UCHAR GreenBPP;
+       UCHAR BlueBPP;
+       UCHAR ReservedBPP;
+       ULONG RsvdOffScrnMemSize;
+       ULONG RsvdOffScrnMEmPtr;
+       UCHAR Reserved[14];
+} VBE_FP_INFO;
+
+typedef struct _VESA_MODE_INFO_BLOCK {
+/*  Mandatory information for all VBE revisions */
+       USHORT ModeAttributes;  /*                  dw      ?       ; mode attributes */
+       UCHAR WinAAttributes;   /*                    db      ?       ; window A attributes */
+       UCHAR WinBAttributes;   /*                    db      ?       ; window B attributes */
+       USHORT WinGranularity;  /*                    dw      ?       ; window granularity */
+       USHORT WinSize;         /*                    dw      ?       ; window size */
+       USHORT WinASegment;     /*                    dw      ?       ; window A start segment */
+       USHORT WinBSegment;     /*                    dw      ?       ; window B start segment */
+       ULONG WinFuncPtr;       /*                    dd      ?       ; real mode pointer to window function */
+       USHORT BytesPerScanLine;        /*                    dw      ?       ; bytes per scan line */
+
+/* ; Mandatory information for VBE 1.2 and above */
+       USHORT XResolution;     /*                         dw      ?       ; horizontal resolution in pixels or characters */
+       USHORT YResolution;     /*                   dw      ?       ; vertical resolution in pixels or characters */
+       UCHAR XCharSize;        /*                   db      ?       ; character cell width in pixels */
+       UCHAR YCharSize;        /*                   db      ?       ; character cell height in pixels */
+       UCHAR NumberOfPlanes;   /*                   db      ?       ; number of memory planes */
+       UCHAR BitsPerPixel;     /*                   db      ?       ; bits per pixel */
+       UCHAR NumberOfBanks;    /*                   db      ?       ; number of banks */
+       UCHAR MemoryModel;      /*                   db      ?       ; memory model type */
+       UCHAR BankSize;         /*                   db      ?       ; bank size in KB */
+       UCHAR NumberOfImagePages;       /*            db    ?       ; number of images */
+       UCHAR ReservedForPageFunction;  /* db  1       ; reserved for page function */
+
+/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */
+       UCHAR RedMaskSize;      /*           db      ?       ; size of direct color red mask in bits */
+       UCHAR RedFieldPosition; /*           db      ?       ; bit position of lsb of red mask */
+       UCHAR GreenMaskSize;    /*           db      ?       ; size of direct color green mask in bits */
+       UCHAR GreenFieldPosition;       /*           db      ?       ; bit position of lsb of green mask */
+       UCHAR BlueMaskSize;     /*           db      ?       ; size of direct color blue mask in bits */
+       UCHAR BlueFieldPosition;        /*           db      ?       ; bit position of lsb of blue mask */
+       UCHAR RsvdMaskSize;     /*           db      ?       ; size of direct color reserved mask in bits */
+       UCHAR RsvdFieldPosition;        /*           db      ?       ; bit position of lsb of reserved mask */
+       UCHAR DirectColorModeInfo;      /*           db      ?       ; direct color mode attributes */
+
+/* ; Mandatory information for VBE 2.0 and above */
+       ULONG PhysBasePtr;      /*           dd      ?       ; physical address for flat memory frame buffer */
+       ULONG Reserved_1;       /*           dd      0       ; reserved - always set to 0 */
+       USHORT Reserved_2;      /*     dw    0       ; reserved - always set to 0 */
+
+/* ; Mandatory information for VBE 3.0 and above */
+       USHORT LinBytesPerScanLine;     /*         dw      ?       ; bytes per scan line for linear modes */
+       UCHAR BnkNumberOfImagePages;    /*         db      ?       ; number of images for banked modes */
+       UCHAR LinNumberOfImagPages;     /*         db      ?       ; number of images for linear modes */
+       UCHAR LinRedMaskSize;   /*         db      ?       ; size of direct color red mask(linear modes) */
+       UCHAR LinRedFieldPosition;      /*         db      ?       ; bit position of lsb of red mask(linear modes) */
+       UCHAR LinGreenMaskSize; /*         db      ?       ; size of direct color green mask(linear modes) */
+       UCHAR LinGreenFieldPosition;    /*         db      ?       ; bit position of lsb of green mask(linear modes) */
+       UCHAR LinBlueMaskSize;  /*         db      ?       ; size of direct color blue mask(linear modes) */
+       UCHAR LinBlueFieldPosition;     /*         db      ?       ; bit position of lsb of blue mask(linear modes) */
+       UCHAR LinRsvdMaskSize;  /*         db      ?       ; size of direct color reserved mask(linear modes) */
+       UCHAR LinRsvdFieldPosition;     /*         db      ?       ; bit position of lsb of reserved mask(linear modes) */
+       ULONG MaxPixelClock;    /*         dd      ?       ; maximum pixel clock(in Hz) for graphics mode */
+       UCHAR Reserved;         /*         db      190 dup (0) */
+} VESA_MODE_INFO_BLOCK;
+
+/*  BIOS function CALLS */
+#define ATOM_BIOS_EXTENDED_FUNCTION_CODE        0xA0   /*  ATI Extended Function code */
+#define ATOM_BIOS_FUNCTION_COP_MODE             0x00
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY1         0x04
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY2         0x05
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY3         0x06
+#define ATOM_BIOS_FUNCTION_GET_DDC              0x0B
+#define ATOM_BIOS_FUNCTION_ASIC_DSTATE          0x0E
+#define ATOM_BIOS_FUNCTION_DEBUG_PLAY           0x0F
+#define ATOM_BIOS_FUNCTION_STV_STD              0x16
+#define ATOM_BIOS_FUNCTION_DEVICE_DET           0x17
+#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH        0x18
+
+#define ATOM_BIOS_FUNCTION_PANEL_CONTROL        0x82
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET       0x83
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH    0x84
+#define ATOM_BIOS_FUNCTION_HW_ICON              0x8A
+#define ATOM_BIOS_FUNCTION_SET_CMOS             0x8B
+#define SUB_FUNCTION_UPDATE_DISPLAY_INFO        0x8000 /*  Sub function 80 */
+#define SUB_FUNCTION_UPDATE_EXPANSION_INFO      0x8100 /*  Sub function 80 */
+
+#define ATOM_BIOS_FUNCTION_DISPLAY_INFO         0x8D
+#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF        0x8E
+#define ATOM_BIOS_FUNCTION_VIDEO_STATE          0x8F
+#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE    0x0300 /*  Sub function 03 */
+#define ATOM_SUB_FUNCTION_GET_LIDSTATE          0x0700 /*  Sub function 7 */
+#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE  0x1400 /*  Notify caller the current thermal state */
+#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /*  Notify caller the current critical state */
+#define ATOM_SUB_FUNCTION_SET_LIDSTATE          0x8500 /*  Sub function 85 */
+#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900       /*  Sub function 89 */
+#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT    0x9400 /*  Notify caller that ADC is supported */
+
+#define ATOM_BIOS_FUNCTION_VESA_DPMS            0x4F10 /*  Set DPMS */
+#define ATOM_SUB_FUNCTION_SET_DPMS              0x0001 /*  BL: Sub function 01 */
+#define ATOM_SUB_FUNCTION_GET_DPMS              0x0002 /*  BL: Sub function 02 */
+#define ATOM_PARAMETER_VESA_DPMS_ON             0x0000 /*  BH Parameter for DPMS ON. */
+#define ATOM_PARAMETER_VESA_DPMS_STANDBY        0x0100 /*  BH Parameter for DPMS STANDBY */
+#define ATOM_PARAMETER_VESA_DPMS_SUSPEND        0x0200 /*  BH Parameter for DPMS SUSPEND */
+#define ATOM_PARAMETER_VESA_DPMS_OFF            0x0400 /*  BH Parameter for DPMS OFF */
+#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON      0x0800 /*  BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */
+
+#define ATOM_BIOS_RETURN_CODE_MASK              0x0000FF00L
+#define ATOM_BIOS_REG_HIGH_MASK                 0x0000FF00L
+#define ATOM_BIOS_REG_LOW_MASK                  0x000000FFL
+
+/*  structure used for VBIOS only */
+
+/* DispOutInfoTable */
+typedef struct _ASIC_TRANSMITTER_INFO {
+       USHORT usTransmitterObjId;
+       USHORT usSupportDevice;
+       UCHAR ucTransmitterCmdTblId;
+       UCHAR ucConfig;
+       UCHAR ucEncoderID;      /* available 1st encoder ( default ) */
+       UCHAR ucOptionEncoderID;        /* available 2nd encoder ( optional ) */
+       UCHAR uc2ndEncoderID;
+       UCHAR ucReserved;
+} ASIC_TRANSMITTER_INFO;
+
+typedef struct _ASIC_ENCODER_INFO {
+       UCHAR ucEncoderID;
+       UCHAR ucEncoderConfig;
+       USHORT usEncoderCmdTblId;
+} ASIC_ENCODER_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT ptrTransmitterInfo;
+       USHORT ptrEncoderInfo;
+       ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
+       ASIC_ENCODER_INFO asEncoderInfo[1];
+} ATOM_DISP_OUT_INFO;
+
+/*  DispDevicePriorityInfo */
+typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT asDevicePriority[16];
+} ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
+
+/* ProcessAuxChannelTransactionTable */
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS {
+       USHORT lpAuxRequest;
+       USHORT lpDataOut;
+       UCHAR ucChannelID;
+       union {
+               UCHAR ucReplyStatus;
+               UCHAR ucDelay;
+       };
+       UCHAR ucDataOutLen;
+       UCHAR ucReserved;
+} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+
+#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION                  PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+
+/* GetSinkType */
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
+       USHORT ucLinkClock;
+       union {
+               UCHAR ucConfig; /*  for DP training command */
+               UCHAR ucI2cId;  /*  use for GET_SINK_TYPE command */
+       };
+       UCHAR ucAction;
+       UCHAR ucStatus;
+       UCHAR ucLaneNum;
+       UCHAR ucReserved[2];
+} DP_ENCODER_SERVICE_PARAMETERS;
+
+/*  ucAction */
+#define ATOM_DP_ACTION_GET_SINK_TYPE                                                   0x01
+#define ATOM_DP_ACTION_TRAINING_START                                                  0x02
+#define ATOM_DP_ACTION_TRAINING_COMPLETE                                       0x03
+#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL                            0x04
+#define ATOM_DP_ACTION_SET_VSWING_PREEMP                                       0x05
+#define ATOM_DP_ACTION_GET_VSWING_PREEMP                                       0x06
+#define ATOM_DP_ACTION_BLANKING                   0x07
+
+/*  ucConfig */
+#define ATOM_DP_CONFIG_ENCODER_SEL_MASK                                                0x03
+#define ATOM_DP_CONFIG_DIG1_ENCODER                                                            0x00
+#define ATOM_DP_CONFIG_DIG2_ENCODER                                                            0x01
+#define ATOM_DP_CONFIG_EXTERNAL_ENCODER                                                0x02
+#define ATOM_DP_CONFIG_LINK_SEL_MASK                                                   0x04
+#define ATOM_DP_CONFIG_LINK_A                                                                                  0x00
+#define ATOM_DP_CONFIG_LINK_B                                                                                  0x04
+
+#define DP_ENCODER_SERVICE_PS_ALLOCATION                               WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+/*  DP_TRAINING_TABLE */
+#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR                            ATOM_DP_TRAINING_TBL_ADDR
+#define DPCD_SET_SS_CNTL_TBL_ADDR                                                                                                      (ATOM_DP_TRAINING_TBL_ADDR + 8 )
+#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR                                                   (ATOM_DP_TRAINING_TBL_ADDR + 16)
+#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR                                                            (ATOM_DP_TRAINING_TBL_ADDR + 24)
+#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR                                                            (ATOM_DP_TRAINING_TBL_ADDR + 32)
+#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR                                                  (ATOM_DP_TRAINING_TBL_ADDR + 40)
+#define        DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR                                                    (ATOM_DP_TRAINING_TBL_ADDR + 48)
+#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR                                                            (ATOM_DP_TRAINING_TBL_ADDR + 60)
+#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR                                                                                  (ATOM_DP_TRAINING_TBL_ADDR + 64)
+#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR                                                             (ATOM_DP_TRAINING_TBL_ADDR + 72)
+#define DP_I2C_AUX_DDC_READ_TBL_ADDR                                                                                   (ATOM_DP_TRAINING_TBL_ADDR + 76)
+#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR                                                                       (ATOM_DP_TRAINING_TBL_ADDR + 80)
+
+typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS {
+       UCHAR ucI2CSpeed;
+       union {
+               UCHAR ucRegIndex;
+               UCHAR ucStatus;
+       };
+       USHORT lpI2CDataOut;
+       UCHAR ucFlag;
+       UCHAR ucTransBytes;
+       UCHAR ucSlaveAddr;
+       UCHAR ucLineNumber;
+} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
+
+#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION       PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+
+/* ucFlag */
+#define HW_I2C_WRITE        1
+#define HW_I2C_READ         0
+
+/****************************************************************************/
+/* Portion VI: Definitinos being oboselete */
+/****************************************************************************/
+
+/* ========================================================================================== */
+/* Remove the definitions below when driver is ready! */
+typedef struct _ATOM_DAC_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usMaxFrequency;  /*  in 10kHz unit */
+       USHORT usReserved;
+} ATOM_DAC_INFO;
+
+typedef struct _COMPASSIONATE_DATA {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+
+       /* ==============================  DAC1 portion */
+       UCHAR ucDAC1_BG_Adjustment;
+       UCHAR ucDAC1_DAC_Adjustment;
+       USHORT usDAC1_FORCE_Data;
+       /* ==============================  DAC2 portion */
+       UCHAR ucDAC2_CRT2_BG_Adjustment;
+       UCHAR ucDAC2_CRT2_DAC_Adjustment;
+       USHORT usDAC2_CRT2_FORCE_Data;
+       USHORT usDAC2_CRT2_MUX_RegisterIndex;
+       UCHAR ucDAC2_CRT2_MUX_RegisterInfo;     /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
+       UCHAR ucDAC2_NTSC_BG_Adjustment;
+       UCHAR ucDAC2_NTSC_DAC_Adjustment;
+       USHORT usDAC2_TV1_FORCE_Data;
+       USHORT usDAC2_TV1_MUX_RegisterIndex;
+       UCHAR ucDAC2_TV1_MUX_RegisterInfo;      /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
+       UCHAR ucDAC2_CV_BG_Adjustment;
+       UCHAR ucDAC2_CV_DAC_Adjustment;
+       USHORT usDAC2_CV_FORCE_Data;
+       USHORT usDAC2_CV_MUX_RegisterIndex;
+       UCHAR ucDAC2_CV_MUX_RegisterInfo;       /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
+       UCHAR ucDAC2_PAL_BG_Adjustment;
+       UCHAR ucDAC2_PAL_DAC_Adjustment;
+       USHORT usDAC2_TV2_FORCE_Data;
+} COMPASSIONATE_DATA;
+
+/****************************Supported Device Info Table Definitions**********************/
+/*   ucConnectInfo: */
+/*     [7:4] - connector type */
+/*       = 1   - VGA connector */
+/*       = 2   - DVI-I */
+/*       = 3   - DVI-D */
+/*       = 4   - DVI-A */
+/*       = 5   - SVIDEO */
+/*       = 6   - COMPOSITE */
+/*       = 7   - LVDS */
+/*       = 8   - DIGITAL LINK */
+/*       = 9   - SCART */
+/*       = 0xA - HDMI_type A */
+/*       = 0xB - HDMI_type B */
+/*       = 0xE - Special case1 (DVI+DIN) */
+/*       Others=TBD */
+/*     [3:0] - DAC Associated */
+/*       = 0   - no DAC */
+/*       = 1   - DACA */
+/*       = 2   - DACB */
+/*       = 3   - External DAC */
+/*       Others=TBD */
+/*  */
+
+typedef struct _ATOM_CONNECTOR_INFO {
+#if ATOM_BIG_ENDIAN
+       UCHAR bfConnectorType:4;
+       UCHAR bfAssociatedDAC:4;
+#else
+       UCHAR bfAssociatedDAC:4;
+       UCHAR bfConnectorType:4;
+#endif
+} ATOM_CONNECTOR_INFO;
+
+typedef union _ATOM_CONNECTOR_INFO_ACCESS {
+       ATOM_CONNECTOR_INFO sbfAccess;
+       UCHAR ucAccess;
+} ATOM_CONNECTOR_INFO_ACCESS;
+
+typedef struct _ATOM_CONNECTOR_INFO_I2C {
+       ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
+       ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+} ATOM_CONNECTOR_INFO_I2C;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usDeviceSupport;
+       ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
+} ATOM_SUPPORTED_DEVICES_INFO;
+
+#define NO_INT_SRC_MAPPED       0xFF
+
+typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP {
+       UCHAR ucIntSrcBitmap;
+} ATOM_CONNECTOR_INC_SRC_BITMAP;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usDeviceSupport;
+       ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+       ATOM_CONNECTOR_INC_SRC_BITMAP
+           asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+} ATOM_SUPPORTED_DEVICES_INFO_2;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usDeviceSupport;
+       ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
+       ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
+} ATOM_SUPPORTED_DEVICES_INFO_2d1;
+
+#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
+
+typedef struct _ATOM_MISC_CONTROL_INFO {
+       USHORT usFrequency;
+       UCHAR ucPLL_ChargePump; /*  PLL charge-pump gain control */
+       UCHAR ucPLL_DutyCycle;  /*  PLL duty cycle control */
+       UCHAR ucPLL_VCO_Gain;   /*  PLL VCO gain control */
+       UCHAR ucPLL_VoltageSwing;       /*  PLL driver voltage swing control */
+} ATOM_MISC_CONTROL_INFO;
+
+#define ATOM_MAX_MISC_INFO       4
+
+typedef struct _ATOM_TMDS_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usMaxFrequency;  /*  in 10Khz */
+       ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
+} ATOM_TMDS_INFO;
+
+typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE {
+       UCHAR ucTVStandard;     /* Same as TV standards defined above, */
+       UCHAR ucPadding[1];
+} ATOM_ENCODER_ANALOG_ATTRIBUTE;
+
+typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE {
+       UCHAR ucAttribute;      /* Same as other digital encoder attributes defined above */
+       UCHAR ucPadding[1];
+} ATOM_ENCODER_DIGITAL_ATTRIBUTE;
+
+typedef union _ATOM_ENCODER_ATTRIBUTE {
+       ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
+       ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
+} ATOM_ENCODER_ATTRIBUTE;
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS {
+       USHORT usPixelClock;
+       USHORT usEncoderID;
+       UCHAR ucDeviceType;     /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */
+       UCHAR ucAction;         /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
+       ATOM_ENCODER_ATTRIBUTE usDevAttr;
+} DVO_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
+       DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
+       WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;     /* Caller doesn't need to init this portion */
+} DVO_ENCODER_CONTROL_PS_ALLOCATION;
+
+#define ATOM_XTMDS_ASIC_SI164_ID        1
+#define ATOM_XTMDS_ASIC_SI178_ID        2
+#define ATOM_XTMDS_ASIC_TFP513_ID       3
+#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001
+#define ATOM_XTMDS_SUPPORTED_DUALLINK   0x00000002
+#define ATOM_XTMDS_MVPU_FPGA            0x00000004
+
+typedef struct _ATOM_XTMDS_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       USHORT usSingleLinkMaxFrequency;
+       ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;     /* Point the ID on which I2C is used to control external chip */
+       UCHAR ucXtransimitterID;
+       UCHAR ucSupportedLink;  /*  Bit field, bit0=1, single link supported;bit1=1,dual link supported */
+       UCHAR ucSequnceAlterID; /*  Even with the same external TMDS asic, it's possible that the program seqence alters */
+       /*  due to design. This ID is used to alert driver that the sequence is not "standard"! */
+       UCHAR ucMasterAddress;  /*  Address to control Master xTMDS Chip */
+       UCHAR ucSlaveAddress;   /*  Address to control Slave xTMDS Chip */
+} ATOM_XTMDS_INFO;
+
+typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
+       UCHAR ucEnable;         /*  ATOM_ENABLE=On or ATOM_DISABLE=Off */
+       UCHAR ucDevice;         /*  ATOM_DEVICE_DFP1_INDEX.... */
+       UCHAR ucPadding[2];
+} DFP_DPMS_STATUS_CHANGE_PARAMETERS;
+
+/****************************Legacy Power Play Table Definitions **********************/
+
+/* Definitions for ulPowerPlayMiscInfo */
+#define ATOM_PM_MISCINFO_SPLIT_CLOCK                     0x00000000L
+#define ATOM_PM_MISCINFO_USING_MCLK_SRC                  0x00000001L
+#define ATOM_PM_MISCINFO_USING_SCLK_SRC                  0x00000002L
+
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT            0x00000004L
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH        0x00000008L
+
+#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN             0x00000010L
+
+#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN          0x00000020L
+#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN          0x00000040L
+#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE                 0x00000080L   /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */
+
+#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN      0x00000100L
+#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN         0x00000200L
+#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN              0x00000400L
+#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN                 0x00000800L
+#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE     0x00001000L
+#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L
+#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE            0x00004000L
+
+#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE             0x00008000L
+#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE                 0x00010000L
+#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE                 0x00020000L
+#define ATOM_PM_MISCINFO_POWER_SAVING_MODE               0x00040000L
+#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE              0x00080000L
+
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK           0x00300000L   /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT          20
+
+#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE                 0x00400000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2      0x00800000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4      0x01000000L
+#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN            0x02000000L   /* When set, Dynamic */
+#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN        0x04000000L   /* When set, Dynamic */
+#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN              0x08000000L   /* When set, This mode is for acceleated 3D mode */
+
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK   0x70000000L   /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT  28
+#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS                0x80000000L
+
+#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE            0x00000001L
+#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT          0x00000002L
+#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN           0x00000004L
+#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO            0x00000008L
+#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE              0x00000010L
+#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN       0x00000020L
+#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE         0x00000040L   /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */
+                                                                     /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */
+#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC                0x00000080L
+#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN                0x00000100L
+#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE               0x00000200L
+
+/* ucTableFormatRevision=1 */
+/* ucTableContentRevision=1 */
+typedef struct _ATOM_POWERMODE_INFO {
+       ULONG ulMiscInfo;       /* The power level should be arranged in ascending order */
+       ULONG ulReserved1;      /*  must set to 0 */
+       ULONG ulReserved2;      /*  must set to 0 */
+       USHORT usEngineClock;
+       USHORT usMemoryClock;
+       UCHAR ucVoltageDropIndex;       /*  index to GPIO table */
+       UCHAR ucSelectedPanel_RefreshRate;      /*  panel refresh rate */
+       UCHAR ucMinTemperature;
+       UCHAR ucMaxTemperature;
+       UCHAR ucNumPciELanes;   /*  number of PCIE lanes */
+} ATOM_POWERMODE_INFO;
+
+/* ucTableFormatRevision=2 */
+/* ucTableContentRevision=1 */
+typedef struct _ATOM_POWERMODE_INFO_V2 {
+       ULONG ulMiscInfo;       /* The power level should be arranged in ascending order */
+       ULONG ulMiscInfo2;
+       ULONG ulEngineClock;
+       ULONG ulMemoryClock;
+       UCHAR ucVoltageDropIndex;       /*  index to GPIO table */
+       UCHAR ucSelectedPanel_RefreshRate;      /*  panel refresh rate */
+       UCHAR ucMinTemperature;
+       UCHAR ucMaxTemperature;
+       UCHAR ucNumPciELanes;   /*  number of PCIE lanes */
+} ATOM_POWERMODE_INFO_V2;
+
+/* ucTableFormatRevision=2 */
+/* ucTableContentRevision=2 */
+typedef struct _ATOM_POWERMODE_INFO_V3 {
+       ULONG ulMiscInfo;       /* The power level should be arranged in ascending order */
+       ULONG ulMiscInfo2;
+       ULONG ulEngineClock;
+       ULONG ulMemoryClock;
+       UCHAR ucVoltageDropIndex;       /*  index to Core (VDDC) votage table */
+       UCHAR ucSelectedPanel_RefreshRate;      /*  panel refresh rate */
+       UCHAR ucMinTemperature;
+       UCHAR ucMaxTemperature;
+       UCHAR ucNumPciELanes;   /*  number of PCIE lanes */
+       UCHAR ucVDDCI_VoltageDropIndex; /*  index to VDDCI votage table */
+} ATOM_POWERMODE_INFO_V3;
+
+#define ATOM_MAX_NUMBEROF_POWER_BLOCK  8
+
+#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN            0x01
+#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE         0x02
+
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63      0x01
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032   0x02
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030   0x03
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649   0x04
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64      0x05
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375    0x06
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512   0x07     /*  Andigilog */
+
+typedef struct _ATOM_POWERPLAY_INFO {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       UCHAR ucOverdriveThermalController;
+       UCHAR ucOverdriveI2cLine;
+       UCHAR ucOverdriveIntBitmap;
+       UCHAR ucOverdriveControllerAddress;
+       UCHAR ucSizeOfPowerModeEntry;
+       UCHAR ucNumOfPowerModeEntries;
+       ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+} ATOM_POWERPLAY_INFO;
+
+typedef struct _ATOM_POWERPLAY_INFO_V2 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       UCHAR ucOverdriveThermalController;
+       UCHAR ucOverdriveI2cLine;
+       UCHAR ucOverdriveIntBitmap;
+       UCHAR ucOverdriveControllerAddress;
+       UCHAR ucSizeOfPowerModeEntry;
+       UCHAR ucNumOfPowerModeEntries;
+       ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+} ATOM_POWERPLAY_INFO_V2;
+
+typedef struct _ATOM_POWERPLAY_INFO_V3 {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+       UCHAR ucOverdriveThermalController;
+       UCHAR ucOverdriveI2cLine;
+       UCHAR ucOverdriveIntBitmap;
+       UCHAR ucOverdriveControllerAddress;
+       UCHAR ucSizeOfPowerModeEntry;
+       UCHAR ucNumOfPowerModeEntries;
+       ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+} ATOM_POWERPLAY_INFO_V3;
+
+/**************************************************************************/
+
+/*  Following definitions are for compatiblity issue in different SW components. */
+#define ATOM_MASTER_DATA_TABLE_REVISION   0x01
+#define Object_Info                                                                                            Object_Header
+#define        AdjustARB_SEQ                                                                                   MC_InitParameter
+#define        VRAM_GPIO_DetectionInfo                                         VoltageObjectInfo
+#define        ASIC_VDDCI_Info                   ASIC_ProfilingInfo
+#define ASIC_MVDDQ_Info                                                                                MemoryTrainingInfo
+#define SS_Info                           PPLL_SS_Info
+#define ASIC_MVDDC_Info                   ASIC_InternalSS_Info
+#define DispDevicePriorityInfo                                         SaveRestoreInfo
+#define DispOutInfo                                                                                            TV_VideoMode
+
+#define ATOM_ENCODER_OBJECT_TABLE         ATOM_OBJECT_TABLE
+#define ATOM_CONNECTOR_OBJECT_TABLE       ATOM_OBJECT_TABLE
+
+/* New device naming, remove them when both DAL/VBIOS is ready */
+#define DFP2I_OUTPUT_CONTROL_PARAMETERS    CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1X_OUTPUT_CONTROL_PARAMETERS    CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1I_OUTPUT_CONTROL_PARAMETERS    DFP1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define ATOM_DEVICE_DFP1I_SUPPORT          ATOM_DEVICE_DFP1_SUPPORT
+#define ATOM_DEVICE_DFP1X_SUPPORT          ATOM_DEVICE_DFP2_SUPPORT
+
+#define ATOM_DEVICE_DFP1I_INDEX            ATOM_DEVICE_DFP1_INDEX
+#define ATOM_DEVICE_DFP1X_INDEX            ATOM_DEVICE_DFP2_INDEX
+
+#define ATOM_DEVICE_DFP2I_INDEX            0x00000009
+#define ATOM_DEVICE_DFP2I_SUPPORT          (0x1L << ATOM_DEVICE_DFP2I_INDEX)
+
+#define ATOM_S0_DFP1I                      ATOM_S0_DFP1
+#define ATOM_S0_DFP1X                      ATOM_S0_DFP2
+
+#define ATOM_S0_DFP2I                      0x00200000L
+#define ATOM_S0_DFP2Ib2                    0x20
+
+#define ATOM_S2_DFP1I_DPMS_STATE           ATOM_S2_DFP1_DPMS_STATE
+#define ATOM_S2_DFP1X_DPMS_STATE           ATOM_S2_DFP2_DPMS_STATE
+
+#define ATOM_S2_DFP2I_DPMS_STATE           0x02000000L
+#define ATOM_S2_DFP2I_DPMS_STATEb3         0x02
+
+#define ATOM_S3_DFP2I_ACTIVEb1             0x02
+
+#define ATOM_S3_DFP1I_ACTIVE               ATOM_S3_DFP1_ACTIVE
+#define ATOM_S3_DFP1X_ACTIVE               ATOM_S3_DFP2_ACTIVE
+
+#define ATOM_S3_DFP2I_ACTIVE               0x00000200L
+
+#define ATOM_S3_DFP1I_CRTC_ACTIVE          ATOM_S3_DFP1_CRTC_ACTIVE
+#define ATOM_S3_DFP1X_CRTC_ACTIVE          ATOM_S3_DFP2_CRTC_ACTIVE
+#define ATOM_S3_DFP2I_CRTC_ACTIVE          0x02000000L
+
+#define ATOM_S3_DFP2I_CRTC_ACTIVEb3        0x02
+#define ATOM_S5_DOS_REQ_DFP2Ib1            0x02
+
+#define ATOM_S5_DOS_REQ_DFP2I              0x0200
+#define ATOM_S6_ACC_REQ_DFP1I              ATOM_S6_ACC_REQ_DFP1
+#define ATOM_S6_ACC_REQ_DFP1X              ATOM_S6_ACC_REQ_DFP2
+
+#define ATOM_S6_ACC_REQ_DFP2Ib3            0x02
+#define ATOM_S6_ACC_REQ_DFP2I              0x02000000L
+
+#define TMDS1XEncoderControl               DVOEncoderControl
+#define DFP1XOutputControl                 DVOOutputControl
+
+#define ExternalDFPOutputControl           DFP1XOutputControl
+#define EnableExternalTMDS_Encoder         TMDS1XEncoderControl
+
+#define DFP1IOutputControl                 TMDSAOutputControl
+#define DFP2IOutputControl                 LVTMAOutputControl
+
+#define DAC1_ENCODER_CONTROL_PARAMETERS    DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define DAC2_ENCODER_CONTROL_PARAMETERS    DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define ucDac1Standard  ucDacStandard
+#define ucDac2Standard  ucDacStandard
+
+#define TMDS1EncoderControl TMDSAEncoderControl
+#define TMDS2EncoderControl LVTMAEncoderControl
+
+#define DFP1OutputControl   TMDSAOutputControl
+#define DFP2OutputControl   LVTMAOutputControl
+#define CRT1OutputControl   DAC1OutputControl
+#define CRT2OutputControl   DAC2OutputControl
+
+/* These two lines will be removed for sure in a few days, will follow up with Michael V. */
+#define EnableLVDS_SS   EnableSpreadSpectrumOnPPLL
+#define ENABLE_LVDS_SS_PARAMETERS_V3  ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+/*********************************************************************************/
+
+#pragma pack()                 /*  BIOS data must use byte aligment */
+
+#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
new file mode 100644 (file)
index 0000000..c0080cc
--- /dev/null
@@ -0,0 +1,695 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon_fixed.h"
+#include "radeon.h"
+#include "atom.h"
+#include "atom-bits.h"
+
+static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       int index =
+           GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
+       ENABLE_CRTC_PS_ALLOCATION args;
+
+       memset(&args, 0, sizeof(args));
+
+       args.ucCRTC = radeon_crtc->crtc_id;
+       args.ucEnable = lock;
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
+       ENABLE_CRTC_PS_ALLOCATION args;
+
+       memset(&args, 0, sizeof(args));
+
+       args.ucCRTC = radeon_crtc->crtc_id;
+       args.ucEnable = state;
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
+       ENABLE_CRTC_PS_ALLOCATION args;
+
+       memset(&args, 0, sizeof(args));
+
+       args.ucCRTC = radeon_crtc->crtc_id;
+       args.ucEnable = state;
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
+       BLANK_CRTC_PS_ALLOCATION args;
+
+       memset(&args, 0, sizeof(args));
+
+       args.ucCRTC = radeon_crtc->crtc_id;
+       args.ucBlanking = state;
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               if (ASIC_IS_DCE3(rdev))
+                       atombios_enable_crtc_memreq(crtc, 1);
+               atombios_enable_crtc(crtc, 1);
+               atombios_blank_crtc(crtc, 0);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               atombios_blank_crtc(crtc, 1);
+               atombios_enable_crtc(crtc, 0);
+               if (ASIC_IS_DCE3(rdev))
+                       atombios_enable_crtc_memreq(crtc, 0);
+               break;
+       }
+
+       if (mode != DRM_MODE_DPMS_OFF) {
+               radeon_crtc_load_lut(crtc);
+       }
+}
+
+static void
+atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+                            SET_CRTC_USING_DTD_TIMING_PARAMETERS * crtc_param)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param;
+       int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
+
+       conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size);
+       conv_param.usH_Blanking_Time =
+           cpu_to_le16(crtc_param->usH_Blanking_Time);
+       conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size);
+       conv_param.usV_Blanking_Time =
+           cpu_to_le16(crtc_param->usV_Blanking_Time);
+       conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset);
+       conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
+       conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset);
+       conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
+       conv_param.susModeMiscInfo.usAccess =
+           cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
+       conv_param.ucCRTC = crtc_param->ucCRTC;
+
+       printk("executing set crtc dtd timing\n");
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
+}
+
+void atombios_crtc_set_timing(struct drm_crtc *crtc,
+                             SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *
+                             crtc_param)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param;
+       int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
+
+       conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total);
+       conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp);
+       conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart);
+       conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
+       conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total);
+       conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp);
+       conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart);
+       conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
+       conv_param.susModeMiscInfo.usAccess =
+           cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
+       conv_param.ucCRTC = crtc_param->ucCRTC;
+       conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
+       conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
+       conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
+       conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
+       conv_param.ucReserved = crtc_param->ucReserved;
+
+       printk("executing set crtc timing\n");
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
+}
+
+void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_encoder *encoder = NULL;
+       struct radeon_encoder *radeon_encoder = NULL;
+       uint8_t frev, crev;
+       int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+       SET_PIXEL_CLOCK_PS_ALLOCATION args;
+       PIXEL_CLOCK_PARAMETERS *spc1_ptr;
+       PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
+       PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
+       uint32_t sclock = mode->clock;
+       uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+       struct radeon_pll *pll;
+       int pll_flags = 0;
+
+       memset(&args, 0, sizeof(args));
+
+       if (ASIC_IS_AVIVO(rdev)) {
+               uint32_t ss_cntl;
+
+               if (ASIC_IS_DCE32(rdev) && mode->clock > 200000)        /* range limits??? */
+                       pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+               else
+                       pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+               /* disable spread spectrum clocking for now -- thanks Hedy Lamarr */
+               if (radeon_crtc->crtc_id == 0) {
+                       ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
+                       WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
+               } else {
+                       ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
+                       WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
+               }
+       } else {
+               pll_flags |= RADEON_PLL_LEGACY;
+
+               if (mode->clock > 200000)       /* range limits??? */
+                       pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+               else
+                       pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+       }
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc == crtc) {
+                       if (!ASIC_IS_AVIVO(rdev)) {
+                               if (encoder->encoder_type !=
+                                   DRM_MODE_ENCODER_DAC)
+                                       pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+                               if (!ASIC_IS_AVIVO(rdev)
+                                   && (encoder->encoder_type ==
+                                       DRM_MODE_ENCODER_LVDS))
+                                       pll_flags |= RADEON_PLL_USE_REF_DIV;
+                       }
+                       radeon_encoder = to_radeon_encoder(encoder);
+               }
+       }
+
+       if (radeon_crtc->crtc_id == 0)
+               pll = &rdev->clock.p1pll;
+       else
+               pll = &rdev->clock.p2pll;
+
+       radeon_compute_pll(pll, mode->clock, &sclock, &fb_div, &frac_fb_div,
+                          &ref_div, &post_div, pll_flags);
+
+       atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+                             &crev);
+
+       switch (frev) {
+       case 1:
+               switch (crev) {
+               case 1:
+                       spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
+                       spc1_ptr->usPixelClock = cpu_to_le16(sclock);
+                       spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
+                       spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
+                       spc1_ptr->ucFracFbDiv = frac_fb_div;
+                       spc1_ptr->ucPostDiv = post_div;
+                       spc1_ptr->ucPpll =
+                           radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+                       spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
+                       spc1_ptr->ucRefDivSrc = 1;
+                       break;
+               case 2:
+                       spc2_ptr =
+                           (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
+                       spc2_ptr->usPixelClock = cpu_to_le16(sclock);
+                       spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
+                       spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
+                       spc2_ptr->ucFracFbDiv = frac_fb_div;
+                       spc2_ptr->ucPostDiv = post_div;
+                       spc2_ptr->ucPpll =
+                           radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+                       spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
+                       spc2_ptr->ucRefDivSrc = 1;
+                       break;
+               case 3:
+                       if (!encoder)
+                               return;
+                       spc3_ptr =
+                           (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
+                       spc3_ptr->usPixelClock = cpu_to_le16(sclock);
+                       spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
+                       spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
+                       spc3_ptr->ucFracFbDiv = frac_fb_div;
+                       spc3_ptr->ucPostDiv = post_div;
+                       spc3_ptr->ucPpll =
+                           radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+                       spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
+                       spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
+                       spc3_ptr->ucEncoderMode =
+                           atombios_get_encoder_mode(encoder);
+                       break;
+               default:
+                       DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+                       return;
+               }
+               break;
+       default:
+               DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+               return;
+       }
+
+       printk("executing set pll\n");
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+                          struct drm_framebuffer *old_fb)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_framebuffer *radeon_fb;
+       struct drm_gem_object *obj;
+       struct drm_radeon_gem_object *obj_priv;
+       uint64_t fb_location;
+       uint32_t fb_format, fb_pitch_pixels;
+
+       if (!crtc->fb)
+               return -EINVAL;
+
+       radeon_fb = to_radeon_framebuffer(crtc->fb);
+
+       obj = radeon_fb->obj;
+       obj_priv = obj->driver_private;
+
+       if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
+               return -EINVAL;
+       }
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 15:
+               fb_format =
+                   AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+                   AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
+               break;
+       case 16:
+               fb_format =
+                   AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+                   AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
+               break;
+       case 24:
+       case 32:
+               fb_format =
+                   AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+                   AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
+               break;
+       default:
+               DRM_ERROR("Unsupported screen depth %d\n",
+                         crtc->fb->bits_per_pixel);
+               return -EINVAL;
+       }
+
+       /* TODO tiling */
+       if (radeon_crtc->crtc_id == 0)
+               WREG32(AVIVO_D1VGA_CONTROL, 0);
+       else
+               WREG32(AVIVO_D2VGA_CONTROL, 0);
+       WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+              (u32) fb_location);
+       WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
+              radeon_crtc->crtc_offset, (u32) fb_location);
+       WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+
+       WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+       WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+       WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
+       WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+       WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
+       WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+
+       fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+       WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+       WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+       WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+              crtc->mode.vdisplay);
+       x &= ~3;
+       y &= ~1;
+       WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
+              (x << 16) | y);
+       WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+              (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+
+       if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+               WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+                      AVIVO_D1MODE_INTERLEAVE_EN);
+       else
+               WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+
+       if (old_fb && old_fb != crtc->fb) {
+               radeon_fb = to_radeon_framebuffer(old_fb);
+               radeon_gem_object_unpin(radeon_fb->obj);
+       }
+       return 0;
+}
+
+int atombios_crtc_mode_set(struct drm_crtc *crtc,
+                          struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode,
+                          int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_encoder *encoder;
+       SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
+
+       /* TODO color tiling */
+       memset(&crtc_timing, 0, sizeof(crtc_timing));
+
+       /* TODO tv */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+       }
+
+       crtc_timing.ucCRTC = radeon_crtc->crtc_id;
+       crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
+       crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
+       crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
+       crtc_timing.usH_SyncWidth =
+           adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
+
+       crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
+       crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
+       crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
+       crtc_timing.usV_SyncWidth =
+           adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+               crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+               crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
+               crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+               crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
+
+       atombios_crtc_set_pll(crtc, adjusted_mode);
+       atombios_crtc_set_timing(crtc, &crtc_timing);
+
+       if (ASIC_IS_AVIVO(rdev))
+               atombios_crtc_set_base(crtc, x, y, old_fb);
+       else {
+               if (radeon_crtc->crtc_id == 0) {
+                       SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing;
+                       memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing));
+
+                       /* setup FP shadow regs on R4xx */
+                       crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id;
+                       crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay;
+                       crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay;
+                       crtc_dtd_timing.usH_Blanking_Time =
+                           adjusted_mode->crtc_hblank_end -
+                           adjusted_mode->crtc_hdisplay;
+                       crtc_dtd_timing.usV_Blanking_Time =
+                           adjusted_mode->crtc_vblank_end -
+                           adjusted_mode->crtc_vdisplay;
+                       crtc_dtd_timing.usH_SyncOffset =
+                           adjusted_mode->crtc_hsync_start -
+                           adjusted_mode->crtc_hdisplay;
+                       crtc_dtd_timing.usV_SyncOffset =
+                           adjusted_mode->crtc_vsync_start -
+                           adjusted_mode->crtc_vdisplay;
+                       crtc_dtd_timing.usH_SyncWidth =
+                           adjusted_mode->crtc_hsync_end -
+                           adjusted_mode->crtc_hsync_start;
+                       crtc_dtd_timing.usV_SyncWidth =
+                           adjusted_mode->crtc_vsync_end -
+                           adjusted_mode->crtc_vsync_start;
+                       /* crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; */
+                       /* crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; */
+
+                       if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+                               crtc_dtd_timing.susModeMiscInfo.usAccess |=
+                                   ATOM_VSYNC_POLARITY;
+
+                       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+                               crtc_dtd_timing.susModeMiscInfo.usAccess |=
+                                   ATOM_HSYNC_POLARITY;
+
+                       if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
+                               crtc_dtd_timing.susModeMiscInfo.usAccess |=
+                                   ATOM_COMPOSITESYNC;
+
+                       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+                               crtc_dtd_timing.susModeMiscInfo.usAccess |=
+                                   ATOM_INTERLACE;
+
+                       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+                               crtc_dtd_timing.susModeMiscInfo.usAccess |=
+                                   ATOM_DOUBLE_CLOCK_MODE;
+
+                       atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing);
+               }
+               radeon_crtc_set_base(crtc, x, y, old_fb);
+               radeon_legacy_atom_set_surface(crtc);
+       }
+       return 0;
+}
+
+static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+                                    struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void atombios_crtc_prepare(struct drm_crtc *crtc)
+{
+       atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       atombios_lock_crtc(crtc, 1);
+}
+
+static void atombios_crtc_commit(struct drm_crtc *crtc)
+{
+       atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+       atombios_lock_crtc(crtc, 0);
+}
+
+static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+       .dpms = atombios_crtc_dpms,
+       .mode_fixup = atombios_crtc_mode_fixup,
+       .mode_set = atombios_crtc_mode_set,
+       .mode_set_base = atombios_crtc_set_base,
+       .prepare = atombios_crtc_prepare,
+       .commit = atombios_crtc_commit,
+};
+
+void radeon_atombios_init_crtc(struct drm_device *dev,
+                              struct radeon_crtc *radeon_crtc)
+{
+       if (radeon_crtc->crtc_id == 1)
+               radeon_crtc->crtc_offset =
+                   AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+       drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
+}
+
+void radeon_init_disp_bw_avivo(struct drm_device *dev,
+                              struct drm_display_mode *mode1,
+                              uint32_t pixel_bytes1,
+                              struct drm_display_mode *mode2,
+                              uint32_t pixel_bytes2)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       fixed20_12 min_mem_eff;
+       fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
+       fixed20_12 sclk_ff, mclk_ff;
+       uint32_t dc_lb_memory_split, temp;
+
+       min_mem_eff.full = rfixed_const_8(0);
+       if (rdev->disp_priority == 2) {
+               uint32_t mc_init_misc_lat_timer = 0;
+               if (rdev->family == CHIP_RV515)
+                       mc_init_misc_lat_timer =
+                           RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER);
+               else if (rdev->family == CHIP_RS690)
+                       mc_init_misc_lat_timer =
+                           RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER);
+
+               mc_init_misc_lat_timer &=
+                   ~(R300_MC_DISP1R_INIT_LAT_MASK <<
+                     R300_MC_DISP1R_INIT_LAT_SHIFT);
+               mc_init_misc_lat_timer &=
+                   ~(R300_MC_DISP0R_INIT_LAT_MASK <<
+                     R300_MC_DISP0R_INIT_LAT_SHIFT);
+
+               if (mode2)
+                       mc_init_misc_lat_timer |=
+                           (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
+               if (mode1)
+                       mc_init_misc_lat_timer |=
+                           (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
+
+               if (rdev->family == CHIP_RV515)
+                       WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER,
+                                 mc_init_misc_lat_timer);
+               else if (rdev->family == CHIP_RS690)
+                       WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER,
+                                 mc_init_misc_lat_timer);
+       }
+
+       /*
+        * determine is there is enough bw for current mode
+        */
+       temp_ff.full = rfixed_const(100);
+       mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
+       mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
+       sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
+       sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
+
+       temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
+       temp_ff.full = rfixed_const(temp);
+       mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
+       mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
+
+       pix_clk.full = 0;
+       pix_clk2.full = 0;
+       peak_disp_bw.full = 0;
+       if (mode1) {
+               temp_ff.full = rfixed_const(1000);
+               pix_clk.full = rfixed_const(mode1->clock);      /* convert to fixed point */
+               pix_clk.full = rfixed_div(pix_clk, temp_ff);
+               temp_ff.full = rfixed_const(pixel_bytes1);
+               peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
+       }
+       if (mode2) {
+               temp_ff.full = rfixed_const(1000);
+               pix_clk2.full = rfixed_const(mode2->clock);     /* convert to fixed point */
+               pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
+               temp_ff.full = rfixed_const(pixel_bytes2);
+               peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
+       }
+
+       if (peak_disp_bw.full >= mem_bw.full) {
+               DRM_ERROR
+                   ("You may not have enough display bandwidth for current mode\n"
+                    "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
+               printk("peak disp bw %d, mem_bw %d\n",
+                      rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw));
+       }
+
+       /*
+        * Line Buffer Setup
+        * There is a single line buffer shared by both display controllers.
+        * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display
+        * controllers.  The paritioning can either be done manually or via one of four
+        * preset allocations specified in bits 1:0:
+        * 0 - line buffer is divided in half and shared between each display controller
+        * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
+        * 2 - D1 gets the whole buffer
+        * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
+        * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode.
+        * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits
+        * 14:4; D2 allocation follows D1.
+        */
+
+       /* is auto or manual better ? */
+       dc_lb_memory_split =
+           RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK;
+       dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
+#if 1
+       /* auto */
+       if (mode1 && mode2) {
+               if (mode1->hdisplay > mode2->hdisplay) {
+                       if (mode1->hdisplay > 2560)
+                               dc_lb_memory_split |=
+                                   AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
+                       else
+                               dc_lb_memory_split |=
+                                   AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+               } else if (mode2->hdisplay > mode1->hdisplay) {
+                       if (mode2->hdisplay > 2560)
+                               dc_lb_memory_split |=
+                                   AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+                       else
+                               dc_lb_memory_split |=
+                                   AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+               } else
+                       dc_lb_memory_split |=
+                           AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+       } else if (mode1) {
+               dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY;
+       } else if (mode2) {
+               dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+       }
+#else
+       /* manual */
+       dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
+       dc_lb_memory_split &=
+           ~(AVIVO_DC_LB_DISP1_END_ADR_MASK <<
+             AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
+       if (mode1) {
+               dc_lb_memory_split |=
+                   ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK)
+                    << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
+       } else if (mode2) {
+               dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
+       }
+#endif
+       WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
+}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
new file mode 100644 (file)
index 0000000..5225f5b
--- /dev/null
@@ -0,0 +1,1524 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_microcode.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* This files gather functions specifics to:
+ * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void r100_hdp_reset(struct radeon_device *rdev);
+void r100_gpu_init(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r100_mc_wait_for_idle(struct radeon_device *rdev);
+void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
+void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
+int r100_debugfs_mc_info_init(struct radeon_device *rdev);
+
+
+/*
+ * PCI GART
+ */
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
+{
+       /* TODO: can we do somethings here ? */
+       /* It seems hw only cache one entry so we should discard this
+        * entry otherwise if first GPU GART read hit this entry it
+        * could end up in wrong address. */
+}
+
+int r100_pci_gart_enable(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int r;
+
+       /* Initialize common gart structure */
+       r = radeon_gart_init(rdev);
+       if (r) {
+               return r;
+       }
+       if (rdev->gart.table.ram.ptr == NULL) {
+               rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+               r = radeon_gart_table_ram_alloc(rdev);
+               if (r) {
+                       return r;
+               }
+       }
+       /* discard memory request outside of configured range */
+       tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+       WREG32(RADEON_AIC_CNTL, tmp);
+       /* set address range for PCI address translate */
+       WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
+       tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+       WREG32(RADEON_AIC_HI_ADDR, tmp);
+       /* Enable bus mastering */
+       tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+       WREG32(RADEON_BUS_CNTL, tmp);
+       /* set PCI GART page-table base address */
+       WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
+       tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
+       WREG32(RADEON_AIC_CNTL, tmp);
+       r100_pci_gart_tlb_flush(rdev);
+       rdev->gart.ready = true;
+       return 0;
+}
+
+void r100_pci_gart_disable(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       /* discard memory request outside of configured range */
+       tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+       WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+       WREG32(RADEON_AIC_LO_ADDR, 0);
+       WREG32(RADEON_AIC_HI_ADDR, 0);
+}
+
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+       if (i < 0 || i > rdev->gart.num_gpu_pages) {
+               return -EINVAL;
+       }
+       rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr);
+       return 0;
+}
+
+int r100_gart_enable(struct radeon_device *rdev)
+{
+       if (rdev->flags & RADEON_IS_AGP) {
+               r100_pci_gart_disable(rdev);
+               return 0;
+       }
+       return r100_pci_gart_enable(rdev);
+}
+
+
+/*
+ * MC
+ */
+void r100_mc_disable_clients(struct radeon_device *rdev)
+{
+       uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
+
+       /* FIXME: is this function correct for rs100,rs200,rs300 ? */
+       if (r100_gui_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait GUI idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+
+       /* stop display and memory access */
+       ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
+       WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
+       crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+       WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
+       crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
+
+       r100_gpu_wait_for_vsync(rdev);
+
+       WREG32(RADEON_CRTC_GEN_CNTL,
+              (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
+              RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
+
+       if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+               crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+
+               r100_gpu_wait_for_vsync2(rdev);
+               WREG32(RADEON_CRTC2_GEN_CNTL,
+                      (crtc2_gen_cntl &
+                       ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
+                      RADEON_CRTC2_DISP_REQ_EN_B);
+       }
+
+       udelay(500);
+}
+
+void r100_mc_setup(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int r;
+
+       r = r100_debugfs_mc_info_init(rdev);
+       if (r) {
+               DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
+       }
+       /* Write VRAM size in case we are limiting it */
+       WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+       tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+       tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
+       tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
+       WREG32(RADEON_MC_FB_LOCATION, tmp);
+
+       /* Enable bus mastering */
+       tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+       WREG32(RADEON_BUS_CNTL, tmp);
+
+       if (rdev->flags & RADEON_IS_AGP) {
+               tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+               tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
+               tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
+               WREG32(RADEON_MC_AGP_LOCATION, tmp);
+               WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
+       } else {
+               WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
+               WREG32(RADEON_AGP_BASE, 0);
+       }
+
+       tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
+       tmp |= (7 << 28);
+       WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
+       (void)RREG32(RADEON_HOST_PATH_CNTL);
+       WREG32(RADEON_HOST_PATH_CNTL, tmp);
+       (void)RREG32(RADEON_HOST_PATH_CNTL);
+}
+
+int r100_mc_init(struct radeon_device *rdev)
+{
+       int r;
+
+       if (r100_debugfs_rbbm_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+       }
+
+       r100_gpu_init(rdev);
+       /* Disable gart which also disable out of gart access */
+       r100_pci_gart_disable(rdev);
+
+       /* Setup GPU memory space */
+       rdev->mc.vram_location = 0xFFFFFFFFUL;
+       rdev->mc.gtt_location = 0xFFFFFFFFUL;
+       if (rdev->flags & RADEON_IS_AGP) {
+               r = radeon_agp_init(rdev);
+               if (r) {
+                       printk(KERN_WARNING "[drm] Disabling AGP\n");
+                       rdev->flags &= ~RADEON_IS_AGP;
+                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+               } else {
+                       rdev->mc.gtt_location = rdev->mc.agp_base;
+               }
+       }
+       r = radeon_mc_setup(rdev);
+       if (r) {
+               return r;
+       }
+
+       r100_mc_disable_clients(rdev);
+       if (r100_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+
+       r100_mc_setup(rdev);
+       return 0;
+}
+
+void r100_mc_fini(struct radeon_device *rdev)
+{
+       r100_pci_gart_disable(rdev);
+       radeon_gart_table_ram_free(rdev);
+       radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Fence emission
+ */
+void r100_fence_ring_emit(struct radeon_device *rdev,
+                         struct radeon_fence *fence)
+{
+       /* Who ever call radeon_fence_emit should call ring_lock and ask
+        * for enough space (today caller are ib schedule and buffer move) */
+       /* Wait until IDLE & CLEAN */
+       radeon_ring_write(rdev, PACKET0(0x1720, 0));
+       radeon_ring_write(rdev, (1 << 16) | (1 << 17));
+       /* Emit fence sequence & fire IRQ */
+       radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
+       radeon_ring_write(rdev, fence->seq);
+       radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
+       radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+}
+
+
+/*
+ * Writeback
+ */
+int r100_wb_init(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->wb.wb_obj == NULL) {
+               r = radeon_object_create(rdev, NULL, 4096,
+                                        true,
+                                        RADEON_GEM_DOMAIN_GTT,
+                                        false, &rdev->wb.wb_obj);
+               if (r) {
+                       DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+                       return r;
+               }
+               r = radeon_object_pin(rdev->wb.wb_obj,
+                                     RADEON_GEM_DOMAIN_GTT,
+                                     &rdev->wb.gpu_addr);
+               if (r) {
+                       DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+                       return r;
+               }
+               r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+               if (r) {
+                       DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+                       return r;
+               }
+       }
+       WREG32(0x774, rdev->wb.gpu_addr);
+       WREG32(0x70C, rdev->wb.gpu_addr + 1024);
+       WREG32(0x770, 0xff);
+       return 0;
+}
+
+void r100_wb_fini(struct radeon_device *rdev)
+{
+       if (rdev->wb.wb_obj) {
+               radeon_object_kunmap(rdev->wb.wb_obj);
+               radeon_object_unpin(rdev->wb.wb_obj);
+               radeon_object_unref(&rdev->wb.wb_obj);
+               rdev->wb.wb = NULL;
+               rdev->wb.wb_obj = NULL;
+       }
+}
+
+int r100_copy_blit(struct radeon_device *rdev,
+                  uint64_t src_offset,
+                  uint64_t dst_offset,
+                  unsigned num_pages,
+                  struct radeon_fence *fence)
+{
+       uint32_t cur_pages;
+       uint32_t stride_bytes = PAGE_SIZE;
+       uint32_t pitch;
+       uint32_t stride_pixels;
+       unsigned ndw;
+       int num_loops;
+       int r = 0;
+
+       /* radeon limited to 16k stride */
+       stride_bytes &= 0x3fff;
+       /* radeon pitch is /64 */
+       pitch = stride_bytes / 64;
+       stride_pixels = stride_bytes / 4;
+       num_loops = DIV_ROUND_UP(num_pages, 8191);
+
+       /* Ask for enough room for blit + flush + fence */
+       ndw = 64 + (10 * num_loops);
+       r = radeon_ring_lock(rdev, ndw);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
+               return -EINVAL;
+       }
+       while (num_pages > 0) {
+               cur_pages = num_pages;
+               if (cur_pages > 8191) {
+                       cur_pages = 8191;
+               }
+               num_pages -= cur_pages;
+
+               /* pages are in Y direction - height
+                  page width in X direction - width */
+               radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
+               radeon_ring_write(rdev,
+                                 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+                                 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+                                 RADEON_GMC_SRC_CLIPPING |
+                                 RADEON_GMC_DST_CLIPPING |
+                                 RADEON_GMC_BRUSH_NONE |
+                                 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
+                                 RADEON_GMC_SRC_DATATYPE_COLOR |
+                                 RADEON_ROP3_S |
+                                 RADEON_DP_SRC_SOURCE_MEMORY |
+                                 RADEON_GMC_CLR_CMP_CNTL_DIS |
+                                 RADEON_GMC_WR_MSK_DIS);
+               radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
+               radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
+               radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
+               radeon_ring_write(rdev, 0);
+               radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
+               radeon_ring_write(rdev, num_pages);
+               radeon_ring_write(rdev, num_pages);
+               radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
+       }
+       radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
+       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(rdev,
+                         RADEON_WAIT_2D_IDLECLEAN |
+                         RADEON_WAIT_HOST_IDLECLEAN |
+                         RADEON_WAIT_DMA_GUI_IDLE);
+       if (fence) {
+               r = radeon_fence_emit(rdev, fence);
+       }
+       radeon_ring_unlock_commit(rdev);
+       return r;
+}
+
+
+/*
+ * CP
+ */
+void r100_ring_start(struct radeon_device *rdev)
+{
+       int r;
+
+       r = radeon_ring_lock(rdev, 2);
+       if (r) {
+               return;
+       }
+       radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
+       radeon_ring_write(rdev,
+                         RADEON_ISYNC_ANY2D_IDLE3D |
+                         RADEON_ISYNC_ANY3D_IDLE2D |
+                         RADEON_ISYNC_WAIT_IDLEGUI |
+                         RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+       radeon_ring_unlock_commit(rdev);
+}
+
+static void r100_cp_load_microcode(struct radeon_device *rdev)
+{
+       int i;
+
+       if (r100_gui_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait GUI idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+
+       WREG32(RADEON_CP_ME_RAM_ADDR, 0);
+       if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
+           (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
+           (rdev->family == CHIP_RS200)) {
+               DRM_INFO("Loading R100 Microcode\n");
+               for (i = 0; i < 256; i++) {
+                       WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
+                       WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
+               }
+       } else if ((rdev->family == CHIP_R200) ||
+                  (rdev->family == CHIP_RV250) ||
+                  (rdev->family == CHIP_RV280) ||
+                  (rdev->family == CHIP_RS300)) {
+               DRM_INFO("Loading R200 Microcode\n");
+               for (i = 0; i < 256; i++) {
+                       WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
+                       WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
+               }
+       } else if ((rdev->family == CHIP_R300) ||
+                  (rdev->family == CHIP_R350) ||
+                  (rdev->family == CHIP_RV350) ||
+                  (rdev->family == CHIP_RV380) ||
+                  (rdev->family == CHIP_RS400) ||
+                  (rdev->family == CHIP_RS480)) {
+               DRM_INFO("Loading R300 Microcode\n");
+               for (i = 0; i < 256; i++) {
+                       WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
+                       WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
+               }
+       } else if ((rdev->family == CHIP_R420) ||
+                  (rdev->family == CHIP_R423) ||
+                  (rdev->family == CHIP_RV410)) {
+               DRM_INFO("Loading R400 Microcode\n");
+               for (i = 0; i < 256; i++) {
+                       WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
+                       WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
+               }
+       } else if ((rdev->family == CHIP_RS690) ||
+                  (rdev->family == CHIP_RS740)) {
+               DRM_INFO("Loading RS690/RS740 Microcode\n");
+               for (i = 0; i < 256; i++) {
+                       WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
+                       WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
+               }
+       } else if (rdev->family == CHIP_RS600) {
+               DRM_INFO("Loading RS600 Microcode\n");
+               for (i = 0; i < 256; i++) {
+                       WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
+                       WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
+               }
+       } else if ((rdev->family == CHIP_RV515) ||
+                  (rdev->family == CHIP_R520) ||
+                  (rdev->family == CHIP_RV530) ||
+                  (rdev->family == CHIP_R580) ||
+                  (rdev->family == CHIP_RV560) ||
+                  (rdev->family == CHIP_RV570)) {
+               DRM_INFO("Loading R500 Microcode\n");
+               for (i = 0; i < 256; i++) {
+                       WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
+                       WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
+               }
+       }
+}
+
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+{
+       unsigned rb_bufsz;
+       unsigned rb_blksz;
+       unsigned max_fetch;
+       unsigned pre_write_timer;
+       unsigned pre_write_limit;
+       unsigned indirect2_start;
+       unsigned indirect1_start;
+       uint32_t tmp;
+       int r;
+
+       if (r100_debugfs_cp_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for CP !\n");
+       }
+       /* Reset CP */
+       tmp = RREG32(RADEON_CP_CSQ_STAT);
+       if ((tmp & (1 << 31))) {
+               DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
+               WREG32(RADEON_CP_CSQ_MODE, 0);
+               WREG32(RADEON_CP_CSQ_CNTL, 0);
+               WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
+               tmp = RREG32(RADEON_RBBM_SOFT_RESET);
+               mdelay(2);
+               WREG32(RADEON_RBBM_SOFT_RESET, 0);
+               tmp = RREG32(RADEON_RBBM_SOFT_RESET);
+               mdelay(2);
+               tmp = RREG32(RADEON_CP_CSQ_STAT);
+               if ((tmp & (1 << 31))) {
+                       DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
+               }
+       } else {
+               DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
+       }
+       /* Align ring size */
+       rb_bufsz = drm_order(ring_size / 8);
+       ring_size = (1 << (rb_bufsz + 1)) * 4;
+       r100_cp_load_microcode(rdev);
+       r = radeon_ring_init(rdev, ring_size);
+       if (r) {
+               return r;
+       }
+       /* Each time the cp read 1024 bytes (16 dword/quadword) update
+        * the rptr copy in system ram */
+       rb_blksz = 9;
+       /* cp will read 128bytes at a time (4 dwords) */
+       max_fetch = 1;
+       rdev->cp.align_mask = 16 - 1;
+       /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
+       pre_write_timer = 64;
+       /* Force CP_RB_WPTR write if written more than one time before the
+        * delay expire
+        */
+       pre_write_limit = 0;
+       /* Setup the cp cache like this (cache size is 96 dwords) :
+        *      RING            0  to 15
+        *      INDIRECT1       16 to 79
+        *      INDIRECT2       80 to 95
+        * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+        *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
+        *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+        * Idea being that most of the gpu cmd will be through indirect1 buffer
+        * so it gets the bigger cache.
+        */
+       indirect2_start = 80;
+       indirect1_start = 16;
+       /* cp setup */
+       WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
+       WREG32(RADEON_CP_RB_CNTL,
+              REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
+              REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
+              REG_SET(RADEON_MAX_FETCH, max_fetch) |
+              RADEON_RB_NO_UPDATE);
+       /* Set ring address */
+       DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
+       WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
+       /* Force read & write ptr to 0 */
+       tmp = RREG32(RADEON_CP_RB_CNTL);
+       WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+       WREG32(RADEON_CP_RB_RPTR_WR, 0);
+       WREG32(RADEON_CP_RB_WPTR, 0);
+       WREG32(RADEON_CP_RB_CNTL, tmp);
+       udelay(10);
+       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+       rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
+       /* Set cp mode to bus mastering & enable cp*/
+       WREG32(RADEON_CP_CSQ_MODE,
+              REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
+              REG_SET(RADEON_INDIRECT1_START, indirect1_start));
+       WREG32(0x718, 0);
+       WREG32(0x744, 0x00004D4D);
+       WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
+       radeon_ring_start(rdev);
+       r = radeon_ring_test(rdev);
+       if (r) {
+               DRM_ERROR("radeon: cp isn't working (%d).\n", r);
+               return r;
+       }
+       rdev->cp.ready = true;
+       return 0;
+}
+
+void r100_cp_fini(struct radeon_device *rdev)
+{
+       /* Disable ring */
+       rdev->cp.ready = false;
+       WREG32(RADEON_CP_CSQ_CNTL, 0);
+       radeon_ring_fini(rdev);
+       DRM_INFO("radeon: cp finalized\n");
+}
+
+void r100_cp_disable(struct radeon_device *rdev)
+{
+       /* Disable ring */
+       rdev->cp.ready = false;
+       WREG32(RADEON_CP_CSQ_MODE, 0);
+       WREG32(RADEON_CP_CSQ_CNTL, 0);
+       if (r100_gui_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait GUI idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+}
+
+int r100_cp_reset(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       bool reinit_cp;
+       int i;
+
+       reinit_cp = rdev->cp.ready;
+       rdev->cp.ready = false;
+       WREG32(RADEON_CP_CSQ_MODE, 0);
+       WREG32(RADEON_CP_CSQ_CNTL, 0);
+       WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
+       (void)RREG32(RADEON_RBBM_SOFT_RESET);
+       udelay(200);
+       WREG32(RADEON_RBBM_SOFT_RESET, 0);
+       /* Wait to prevent race in RBBM_STATUS */
+       mdelay(1);
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(RADEON_RBBM_STATUS);
+               if (!(tmp & (1 << 16))) {
+                       DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
+                                tmp);
+                       if (reinit_cp) {
+                               return r100_cp_init(rdev, rdev->cp.ring_size);
+                       }
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       tmp = RREG32(RADEON_RBBM_STATUS);
+       DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
+       return -1;
+}
+
+
+/*
+ * CS functions
+ */
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+                         struct radeon_cs_packet *pkt,
+                         unsigned *auth, unsigned n,
+                         radeon_packet0_check_t check)
+{
+       unsigned reg;
+       unsigned i, j, m;
+       unsigned idx;
+       int r;
+
+       idx = pkt->idx + 1;
+       reg = pkt->reg;
+       if (pkt->one_reg_wr) {
+               if ((reg >> 7) > n) {
+                       return -EINVAL;
+               }
+       } else {
+               if (((reg + (pkt->count << 2)) >> 7) > n) {
+                       return -EINVAL;
+               }
+       }
+       for (i = 0; i <= pkt->count; i++, idx++) {
+               j = (reg >> 7);
+               m = 1 << ((reg >> 2) & 31);
+               if (auth[j] & m) {
+                       r = check(p, pkt, idx, reg);
+                       if (r) {
+                               return r;
+                       }
+               }
+               if (pkt->one_reg_wr) {
+                       if (!(auth[j] & m)) {
+                               break;
+                       }
+               } else {
+                       reg += 4;
+               }
+       }
+       return 0;
+}
+
+int r100_cs_parse_packet3(struct radeon_cs_parser *p,
+                         struct radeon_cs_packet *pkt,
+                         unsigned *auth, unsigned n,
+                         radeon_packet3_check_t check)
+{
+       unsigned i, m;
+
+       if ((pkt->opcode >> 5) > n) {
+               return -EINVAL;
+       }
+       i = pkt->opcode >> 5;
+       m = 1 << (pkt->opcode & 31);
+       if (auth[i] & m) {
+               return check(p, pkt);
+       }
+       return 0;
+}
+
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+                        struct radeon_cs_packet *pkt)
+{
+       struct radeon_cs_chunk *ib_chunk;
+       volatile uint32_t *ib;
+       unsigned i;
+       unsigned idx;
+
+       ib = p->ib->ptr;
+       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       idx = pkt->idx;
+       for (i = 0; i <= (pkt->count + 1); i++, idx++) {
+               DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+       }
+}
+
+/**
+ * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser:    parser structure holding parsing context.
+ * @pkt:       where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+                        struct radeon_cs_packet *pkt,
+                        unsigned idx)
+{
+       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+       uint32_t header = ib_chunk->kdata[idx];
+
+       if (idx >= ib_chunk->length_dw) {
+               DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+                         idx, ib_chunk->length_dw);
+               return -EINVAL;
+       }
+       pkt->idx = idx;
+       pkt->type = CP_PACKET_GET_TYPE(header);
+       pkt->count = CP_PACKET_GET_COUNT(header);
+       switch (pkt->type) {
+       case PACKET_TYPE0:
+               pkt->reg = CP_PACKET0_GET_REG(header);
+               pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
+               break;
+       case PACKET_TYPE3:
+               pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+               break;
+       case PACKET_TYPE2:
+               pkt->count = -1;
+               break;
+       default:
+               DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+               return -EINVAL;
+       }
+       if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+               DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+                         pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/**
+ * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * @parser:            parser structure holding parsing context.
+ * @data:              pointer to relocation data
+ * @offset_start:      starting offset
+ * @offset_mask:       offset mask (to align start offset on)
+ * @reloc:             reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
+                             struct radeon_cs_reloc **cs_reloc)
+{
+       struct radeon_cs_chunk *ib_chunk;
+       struct radeon_cs_chunk *relocs_chunk;
+       struct radeon_cs_packet p3reloc;
+       unsigned idx;
+       int r;
+
+       if (p->chunk_relocs_idx == -1) {
+               DRM_ERROR("No relocation chunk !\n");
+               return -EINVAL;
+       }
+       *cs_reloc = NULL;
+       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       r = r100_cs_packet_parse(p, &p3reloc, p->idx);
+       if (r) {
+               return r;
+       }
+       p->idx += p3reloc.count + 2;
+       if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+               DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+                         p3reloc.idx);
+               r100_cs_dump_packet(p, &p3reloc);
+               return -EINVAL;
+       }
+       idx = ib_chunk->kdata[p3reloc.idx + 1];
+       if (idx >= relocs_chunk->length_dw) {
+               DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+                         idx, relocs_chunk->length_dw);
+               r100_cs_dump_packet(p, &p3reloc);
+               return -EINVAL;
+       }
+       /* FIXME: we assume reloc size is 4 dwords */
+       *cs_reloc = p->relocs_ptr[(idx / 4)];
+       return 0;
+}
+
+static int r100_packet0_check(struct radeon_cs_parser *p,
+                             struct radeon_cs_packet *pkt)
+{
+       struct radeon_cs_chunk *ib_chunk;
+       struct radeon_cs_reloc *reloc;
+       volatile uint32_t *ib;
+       uint32_t tmp;
+       unsigned reg;
+       unsigned i;
+       unsigned idx;
+       bool onereg;
+       int r;
+
+       ib = p->ib->ptr;
+       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       idx = pkt->idx + 1;
+       reg = pkt->reg;
+       onereg = false;
+       if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
+               onereg = true;
+       }
+       for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+               switch (reg) {
+               /* FIXME: only allow PACKET3 blit? easier to check for out of
+                * range access */
+               case RADEON_DST_PITCH_OFFSET:
+               case RADEON_SRC_PITCH_OFFSET:
+                       r = r100_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+                                         idx, reg);
+                               r100_cs_dump_packet(p, pkt);
+                               return r;
+                       }
+                       tmp = ib_chunk->kdata[idx] & 0x003fffff;
+                       tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+                       ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
+                       break;
+               case RADEON_RB3D_DEPTHOFFSET:
+               case RADEON_RB3D_COLOROFFSET:
+               case R300_RB3D_COLOROFFSET0:
+               case R300_ZB_DEPTHOFFSET:
+               case R200_PP_TXOFFSET_0:
+               case R200_PP_TXOFFSET_1:
+               case R200_PP_TXOFFSET_2:
+               case R200_PP_TXOFFSET_3:
+               case R200_PP_TXOFFSET_4:
+               case R200_PP_TXOFFSET_5:
+               case RADEON_PP_TXOFFSET_0:
+               case RADEON_PP_TXOFFSET_1:
+               case RADEON_PP_TXOFFSET_2:
+               case R300_TX_OFFSET_0:
+               case R300_TX_OFFSET_0+4:
+               case R300_TX_OFFSET_0+8:
+               case R300_TX_OFFSET_0+12:
+               case R300_TX_OFFSET_0+16:
+               case R300_TX_OFFSET_0+20:
+               case R300_TX_OFFSET_0+24:
+               case R300_TX_OFFSET_0+28:
+               case R300_TX_OFFSET_0+32:
+               case R300_TX_OFFSET_0+36:
+               case R300_TX_OFFSET_0+40:
+               case R300_TX_OFFSET_0+44:
+               case R300_TX_OFFSET_0+48:
+               case R300_TX_OFFSET_0+52:
+               case R300_TX_OFFSET_0+56:
+               case R300_TX_OFFSET_0+60:
+                       r = r100_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+                                         idx, reg);
+                               r100_cs_dump_packet(p, pkt);
+                               return r;
+                       }
+                       ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+                       break;
+               default:
+                       /* FIXME: we don't want to allow anyothers packet */
+                       break;
+               }
+               if (onereg) {
+                       /* FIXME: forbid onereg write to register on relocate */
+                       break;
+               }
+       }
+       return 0;
+}
+
+static int r100_packet3_check(struct radeon_cs_parser *p,
+                             struct radeon_cs_packet *pkt)
+{
+       struct radeon_cs_chunk *ib_chunk;
+       struct radeon_cs_reloc *reloc;
+       unsigned idx;
+       unsigned i, c;
+       volatile uint32_t *ib;
+       int r;
+
+       ib = p->ib->ptr;
+       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       idx = pkt->idx + 1;
+       switch (pkt->opcode) {
+       case PACKET3_3D_LOAD_VBPNTR:
+               c = ib_chunk->kdata[idx++];
+               for (i = 0; i < (c - 1); i += 2, idx += 3) {
+                       r = r100_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("No reloc for packet3 %d\n",
+                                         pkt->opcode);
+                               r100_cs_dump_packet(p, pkt);
+                               return r;
+                       }
+                       ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+                       r = r100_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("No reloc for packet3 %d\n",
+                                         pkt->opcode);
+                               r100_cs_dump_packet(p, pkt);
+                               return r;
+                       }
+                       ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
+               }
+               if (c & 1) {
+                       r = r100_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("No reloc for packet3 %d\n",
+                                         pkt->opcode);
+                               r100_cs_dump_packet(p, pkt);
+                               return r;
+                       }
+                       ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+               }
+               break;
+       case PACKET3_INDX_BUFFER:
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+               break;
+       case 0x23:
+               /* FIXME: cleanup */
+               /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               break;
+       case PACKET3_3D_DRAW_IMMD:
+               /* triggers drawing using in-packet vertex data */
+       case PACKET3_3D_DRAW_IMMD_2:
+               /* triggers drawing using in-packet vertex data */
+       case PACKET3_3D_DRAW_VBUF_2:
+               /* triggers drawing of vertex buffers setup elsewhere */
+       case PACKET3_3D_DRAW_INDX_2:
+               /* triggers drawing using indices to vertex buffer */
+       case PACKET3_3D_DRAW_VBUF:
+               /* triggers drawing of vertex buffers setup elsewhere */
+       case PACKET3_3D_DRAW_INDX:
+               /* triggers drawing using indices to vertex buffer */
+       case PACKET3_NOP:
+               break;
+       default:
+               DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int r100_cs_parse(struct radeon_cs_parser *p)
+{
+       struct radeon_cs_packet pkt;
+       int r;
+
+       do {
+               r = r100_cs_packet_parse(p, &pkt, p->idx);
+               if (r) {
+                       return r;
+               }
+               p->idx += pkt.count + 2;
+               switch (pkt.type) {
+               case PACKET_TYPE0:
+                       r = r100_packet0_check(p, &pkt);
+                       break;
+               case PACKET_TYPE2:
+                       break;
+               case PACKET_TYPE3:
+                       r = r100_packet3_check(p, &pkt);
+                       break;
+               default:
+                       DRM_ERROR("Unknown packet type %d !\n",
+                                       pkt.type);
+                       return -EINVAL;
+               }
+               if (r) {
+                       return r;
+               }
+       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       return 0;
+}
+
+
+/*
+ * Global GPU functions
+ */
+void r100_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+
+       if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
+               rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
+       }
+
+       if (rdev->family == CHIP_RV100 ||
+           rdev->family == CHIP_RS100 ||
+           rdev->family == CHIP_RS200) {
+               rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
+       }
+}
+
+/* Wait for vertical sync on primary CRTC */
+void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
+{
+       uint32_t crtc_gen_cntl, tmp;
+       int i;
+
+       crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
+       if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
+           !(crtc_gen_cntl & RADEON_CRTC_EN)) {
+               return;
+       }
+       /* Clear the CRTC_VBLANK_SAVE bit */
+       WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(RADEON_CRTC_STATUS);
+               if (tmp & RADEON_CRTC_VBLANK_SAVE) {
+                       return;
+               }
+               DRM_UDELAY(1);
+       }
+}
+
+/* Wait for vertical sync on secondary CRTC */
+void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
+{
+       uint32_t crtc2_gen_cntl, tmp;
+       int i;
+
+       crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+       if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
+           !(crtc2_gen_cntl & RADEON_CRTC2_EN))
+               return;
+
+       /* Clear the CRTC_VBLANK_SAVE bit */
+       WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(RADEON_CRTC2_STATUS);
+               if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
+                       return;
+               }
+               DRM_UDELAY(1);
+       }
+}
+
+int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
+{
+       unsigned i;
+       uint32_t tmp;
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
+               if (tmp >= n) {
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       return -1;
+}
+
+int r100_gui_wait_for_idle(struct radeon_device *rdev)
+{
+       unsigned i;
+       uint32_t tmp;
+
+       if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
+               printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
+                      " Bad things might happen.\n");
+       }
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(RADEON_RBBM_STATUS);
+               if (!(tmp & (1 << 31))) {
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       return -1;
+}
+
+int r100_mc_wait_for_idle(struct radeon_device *rdev)
+{
+       unsigned i;
+       uint32_t tmp;
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32(0x0150);
+               if (tmp & (1 << 2)) {
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       return -1;
+}
+
+void r100_gpu_init(struct radeon_device *rdev)
+{
+       /* TODO: anythings to do here ? pipes ? */
+       r100_hdp_reset(rdev);
+}
+
+void r100_hdp_reset(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
+       tmp |= (7 << 28);
+       WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
+       (void)RREG32(RADEON_HOST_PATH_CNTL);
+       udelay(200);
+       WREG32(RADEON_RBBM_SOFT_RESET, 0);
+       WREG32(RADEON_HOST_PATH_CNTL, tmp);
+       (void)RREG32(RADEON_HOST_PATH_CNTL);
+}
+
+int r100_rb2d_reset(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int i;
+
+       WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
+       (void)RREG32(RADEON_RBBM_SOFT_RESET);
+       udelay(200);
+       WREG32(RADEON_RBBM_SOFT_RESET, 0);
+       /* Wait to prevent race in RBBM_STATUS */
+       mdelay(1);
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(RADEON_RBBM_STATUS);
+               if (!(tmp & (1 << 26))) {
+                       DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
+                                tmp);
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       tmp = RREG32(RADEON_RBBM_STATUS);
+       DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
+       return -1;
+}
+
+int r100_gpu_reset(struct radeon_device *rdev)
+{
+       uint32_t status;
+
+       /* reset order likely matter */
+       status = RREG32(RADEON_RBBM_STATUS);
+       /* reset HDP */
+       r100_hdp_reset(rdev);
+       /* reset rb2d */
+       if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
+               r100_rb2d_reset(rdev);
+       }
+       /* TODO: reset 3D engine */
+       /* reset CP */
+       status = RREG32(RADEON_RBBM_STATUS);
+       if (status & (1 << 16)) {
+               r100_cp_reset(rdev);
+       }
+       /* Check if GPU is idle */
+       status = RREG32(RADEON_RBBM_STATUS);
+       if (status & (1 << 31)) {
+               DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+               return -1;
+       }
+       DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+       return 0;
+}
+
+
+/*
+ * VRAM info
+ */
+static void r100_vram_get_type(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       rdev->mc.vram_is_ddr = false;
+       if (rdev->flags & RADEON_IS_IGP)
+               rdev->mc.vram_is_ddr = true;
+       else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
+               rdev->mc.vram_is_ddr = true;
+       if ((rdev->family == CHIP_RV100) ||
+           (rdev->family == CHIP_RS100) ||
+           (rdev->family == CHIP_RS200)) {
+               tmp = RREG32(RADEON_MEM_CNTL);
+               if (tmp & RV100_HALF_MODE) {
+                       rdev->mc.vram_width = 32;
+               } else {
+                       rdev->mc.vram_width = 64;
+               }
+               if (rdev->flags & RADEON_SINGLE_CRTC) {
+                       rdev->mc.vram_width /= 4;
+                       rdev->mc.vram_is_ddr = true;
+               }
+       } else if (rdev->family <= CHIP_RV280) {
+               tmp = RREG32(RADEON_MEM_CNTL);
+               if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
+                       rdev->mc.vram_width = 128;
+               } else {
+                       rdev->mc.vram_width = 64;
+               }
+       } else {
+               /* newer IGPs */
+               rdev->mc.vram_width = 128;
+       }
+}
+
+void r100_vram_info(struct radeon_device *rdev)
+{
+       r100_vram_get_type(rdev);
+
+       if (rdev->flags & RADEON_IS_IGP) {
+               uint32_t tom;
+               /* read NB_TOM to get the amount of ram stolen for the GPU */
+               tom = RREG32(RADEON_NB_TOM);
+               rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
+               WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+       } else {
+               rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+               /* Some production boards of m6 will report 0
+                * if it's 8 MB
+                */
+               if (rdev->mc.vram_size == 0) {
+                       rdev->mc.vram_size = 8192 * 1024;
+                       WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+               }
+       }
+
+       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+       if (rdev->mc.aper_size > rdev->mc.vram_size) {
+               /* Why does some hw doesn't have CONFIG_MEMSIZE properly
+                * setup ? */
+               rdev->mc.vram_size = rdev->mc.aper_size;
+               WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+       }
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+void r100_pll_errata_after_index(struct radeon_device *rdev)
+{
+       if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
+               return;
+       }
+       (void)RREG32(RADEON_CLOCK_CNTL_DATA);
+       (void)RREG32(RADEON_CRTC_GEN_CNTL);
+}
+
+static void r100_pll_errata_after_data(struct radeon_device *rdev)
+{
+       /* This workarounds is necessary on RV100, RS100 and RS200 chips
+        * or the chip could hang on a subsequent access
+        */
+       if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
+               udelay(5000);
+       }
+
+       /* This function is required to workaround a hardware bug in some (all?)
+        * revisions of the R300.  This workaround should be called after every
+        * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
+        * may not be correct.
+        */
+       if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
+               uint32_t save, tmp;
+
+               save = RREG32(RADEON_CLOCK_CNTL_INDEX);
+               tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
+               WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
+               tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
+               WREG32(RADEON_CLOCK_CNTL_INDEX, save);
+       }
+}
+
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       uint32_t data;
+
+       WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
+       r100_pll_errata_after_index(rdev);
+       data = RREG32(RADEON_CLOCK_CNTL_DATA);
+       r100_pll_errata_after_data(rdev);
+       return data;
+}
+
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
+       r100_pll_errata_after_index(rdev);
+       WREG32(RADEON_CLOCK_CNTL_DATA, v);
+       r100_pll_errata_after_data(rdev);
+}
+
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       if (reg < 0x10000)
+               return readl(((void __iomem *)rdev->rmmio) + reg);
+       else {
+               writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+               return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+       }
+}
+
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       if (reg < 0x10000)
+               writel(v, ((void __iomem *)rdev->rmmio) + reg);
+       else {
+               writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+               writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+       }
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t reg, value;
+       unsigned i;
+
+       seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
+       seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
+       seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+       for (i = 0; i < 64; i++) {
+               WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
+               reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
+               WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
+               value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
+               seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
+       }
+       return 0;
+}
+
+static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t rdp, wdp;
+       unsigned count, i, j;
+
+       radeon_ring_free_size(rdev);
+       rdp = RREG32(RADEON_CP_RB_RPTR);
+       wdp = RREG32(RADEON_CP_RB_WPTR);
+       count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+       seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+       seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
+       seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+       seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
+       seq_printf(m, "%u dwords in ring\n", count);
+       for (j = 0; j <= count; j++) {
+               i = (rdp + j) & rdev->cp.ptr_mask;
+               seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+       }
+       return 0;
+}
+
+
+static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t csq_stat, csq2_stat, tmp;
+       unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
+       unsigned i;
+
+       seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+       seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
+       csq_stat = RREG32(RADEON_CP_CSQ_STAT);
+       csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
+       r_rptr = (csq_stat >> 0) & 0x3ff;
+       r_wptr = (csq_stat >> 10) & 0x3ff;
+       ib1_rptr = (csq_stat >> 20) & 0x3ff;
+       ib1_wptr = (csq2_stat >> 0) & 0x3ff;
+       ib2_rptr = (csq2_stat >> 10) & 0x3ff;
+       ib2_wptr = (csq2_stat >> 20) & 0x3ff;
+       seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
+       seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
+       seq_printf(m, "Ring rptr %u\n", r_rptr);
+       seq_printf(m, "Ring wptr %u\n", r_wptr);
+       seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
+       seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
+       seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
+       seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
+       /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
+        * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
+       seq_printf(m, "Ring fifo:\n");
+       for (i = 0; i < 256; i++) {
+               WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+               tmp = RREG32(RADEON_CP_CSQ_DATA);
+               seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
+       }
+       seq_printf(m, "Indirect1 fifo:\n");
+       for (i = 256; i <= 512; i++) {
+               WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+               tmp = RREG32(RADEON_CP_CSQ_DATA);
+               seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
+       }
+       seq_printf(m, "Indirect2 fifo:\n");
+       for (i = 640; i < ib1_wptr; i++) {
+               WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+               tmp = RREG32(RADEON_CP_CSQ_DATA);
+               seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
+       }
+       return 0;
+}
+
+static int r100_debugfs_mc_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t tmp;
+
+       tmp = RREG32(RADEON_CONFIG_MEMSIZE);
+       seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
+       tmp = RREG32(RADEON_MC_FB_LOCATION);
+       seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
+       tmp = RREG32(RADEON_BUS_CNTL);
+       seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+       tmp = RREG32(RADEON_MC_AGP_LOCATION);
+       seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+       tmp = RREG32(RADEON_AGP_BASE);
+       seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+       tmp = RREG32(RADEON_HOST_PATH_CNTL);
+       seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+       tmp = RREG32(0x01D0);
+       seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
+       tmp = RREG32(RADEON_AIC_LO_ADDR);
+       seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
+       tmp = RREG32(RADEON_AIC_HI_ADDR);
+       seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
+       tmp = RREG32(0x01E4);
+       seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
+       return 0;
+}
+
+static struct drm_info_list r100_debugfs_rbbm_list[] = {
+       {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_cp_list[] = {
+       {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
+       {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_mc_info_list[] = {
+       {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
+};
+#endif
+
+int r100_debugfs_rbbm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
+#else
+       return 0;
+#endif
+}
+
+int r100_debugfs_cp_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
+#else
+       return 0;
+#endif
+}
+
+int r100_debugfs_mc_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
+#else
+       return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
new file mode 100644 (file)
index 0000000..f5870a0
--- /dev/null
@@ -0,0 +1,1116 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* r300,r350,rv350,rv370,rv380 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+int r100_rb2d_reset(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+int r100_pci_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+void r100_mc_setup(struct radeon_device *rdev);
+void r100_mc_disable_clients(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+                        struct radeon_cs_packet *pkt,
+                        unsigned idx);
+int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
+                             struct radeon_cs_reloc **cs_reloc);
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+                         struct radeon_cs_packet *pkt,
+                         unsigned *auth, unsigned n,
+                         radeon_packet0_check_t check);
+int r100_cs_parse_packet3(struct radeon_cs_parser *p,
+                         struct radeon_cs_packet *pkt,
+                         unsigned *auth, unsigned n,
+                         radeon_packet3_check_t check);
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+                        struct radeon_cs_packet *pkt);
+
+/* This files gather functions specifics to:
+ * r300,r350,rv350,rv370,rv380
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void r300_gpu_init(struct radeon_device *rdev);
+int r300_mc_wait_for_idle(struct radeon_device *rdev);
+int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+
+/*
+ * rv370,rv380 PCIE GART
+ */
+void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int i;
+
+       /* Workaround HW bug do flush 2 times */
+       for (i = 0; i < 2; i++) {
+               tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+               WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
+               (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+               WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+               mb();
+       }
+}
+
+int rv370_pcie_gart_enable(struct radeon_device *rdev)
+{
+       uint32_t table_addr;
+       uint32_t tmp;
+       int r;
+
+       /* Initialize common gart structure */
+       r = radeon_gart_init(rdev);
+       if (r) {
+               return r;
+       }
+       r = rv370_debugfs_pcie_gart_info_init(rdev);
+       if (r) {
+               DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
+       }
+       rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+       r = radeon_gart_table_vram_alloc(rdev);
+       if (r) {
+               return r;
+       }
+       /* discard memory request outside of configured range */
+       tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+       WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+       WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
+       tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
+       WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
+       WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+       WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
+       table_addr = rdev->gart.table_addr;
+       WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
+       /* FIXME: setup default page */
+       WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
+       WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
+       /* Clear error */
+       WREG32_PCIE(0x18, 0);
+       tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+       tmp |= RADEON_PCIE_TX_GART_EN;
+       tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+       WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+       rv370_pcie_gart_tlb_flush(rdev);
+       DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
+                rdev->mc.gtt_size >> 20, table_addr);
+       rdev->gart.ready = true;
+       return 0;
+}
+
+void rv370_pcie_gart_disable(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+       tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+       WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
+       if (rdev->gart.table.vram.robj) {
+               radeon_object_kunmap(rdev->gart.table.vram.robj);
+               radeon_object_unpin(rdev->gart.table.vram.robj);
+       }
+}
+
+int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+       void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+
+       if (i < 0 || i > rdev->gart.num_gpu_pages) {
+               return -EINVAL;
+       }
+       addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
+       writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
+       return 0;
+}
+
+int r300_gart_enable(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+       if (rdev->flags & RADEON_IS_AGP) {
+               if (rdev->family > CHIP_RV350) {
+                       rv370_pcie_gart_disable(rdev);
+               } else {
+                       r100_pci_gart_disable(rdev);
+               }
+               return 0;
+       }
+#endif
+       if (rdev->flags & RADEON_IS_PCIE) {
+               rdev->asic->gart_disable = &rv370_pcie_gart_disable;
+               rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+               rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+               return rv370_pcie_gart_enable(rdev);
+       }
+       return r100_pci_gart_enable(rdev);
+}
+
+
+/*
+ * MC
+ */
+int r300_mc_init(struct radeon_device *rdev)
+{
+       int r;
+
+       if (r100_debugfs_rbbm_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+       }
+
+       r300_gpu_init(rdev);
+       r100_pci_gart_disable(rdev);
+       if (rdev->flags & RADEON_IS_PCIE) {
+               rv370_pcie_gart_disable(rdev);
+       }
+
+       /* Setup GPU memory space */
+       rdev->mc.vram_location = 0xFFFFFFFFUL;
+       rdev->mc.gtt_location = 0xFFFFFFFFUL;
+       if (rdev->flags & RADEON_IS_AGP) {
+               r = radeon_agp_init(rdev);
+               if (r) {
+                       printk(KERN_WARNING "[drm] Disabling AGP\n");
+                       rdev->flags &= ~RADEON_IS_AGP;
+                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+               } else {
+                       rdev->mc.gtt_location = rdev->mc.agp_base;
+               }
+       }
+       r = radeon_mc_setup(rdev);
+       if (r) {
+               return r;
+       }
+
+       /* Program GPU memory space */
+       r100_mc_disable_clients(rdev);
+       if (r300_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+       r100_mc_setup(rdev);
+       return 0;
+}
+
+void r300_mc_fini(struct radeon_device *rdev)
+{
+       if (rdev->flags & RADEON_IS_PCIE) {
+               rv370_pcie_gart_disable(rdev);
+               radeon_gart_table_vram_free(rdev);
+       } else {
+               r100_pci_gart_disable(rdev);
+               radeon_gart_table_ram_free(rdev);
+       }
+       radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Fence emission
+ */
+void r300_fence_ring_emit(struct radeon_device *rdev,
+                         struct radeon_fence *fence)
+{
+       /* Who ever call radeon_fence_emit should call ring_lock and ask
+        * for enough space (today caller are ib schedule and buffer move) */
+       /* Write SC register so SC & US assert idle */
+       radeon_ring_write(rdev, PACKET0(0x43E0, 0));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, PACKET0(0x43E4, 0));
+       radeon_ring_write(rdev, 0);
+       /* Flush 3D cache */
+       radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
+       radeon_ring_write(rdev, (2 << 0));
+       radeon_ring_write(rdev, PACKET0(0x4F18, 0));
+       radeon_ring_write(rdev, (1 << 0));
+       /* Wait until IDLE & CLEAN */
+       radeon_ring_write(rdev, PACKET0(0x1720, 0));
+       radeon_ring_write(rdev, (1 << 17) | (1 << 16)  | (1 << 9));
+       /* Emit fence sequence & fire IRQ */
+       radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
+       radeon_ring_write(rdev, fence->seq);
+       radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
+       radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+}
+
+
+/*
+ * Global GPU functions
+ */
+int r300_copy_dma(struct radeon_device *rdev,
+                 uint64_t src_offset,
+                 uint64_t dst_offset,
+                 unsigned num_pages,
+                 struct radeon_fence *fence)
+{
+       uint32_t size;
+       uint32_t cur_size;
+       int i, num_loops;
+       int r = 0;
+
+       /* radeon pitch is /64 */
+       size = num_pages << PAGE_SHIFT;
+       num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+       r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               return r;
+       }
+       /* Must wait for 2D idle & clean before DMA or hangs might happen */
+       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(rdev, (1 << 16));
+       for (i = 0; i < num_loops; i++) {
+               cur_size = size;
+               if (cur_size > 0x1FFFFF) {
+                       cur_size = 0x1FFFFF;
+               }
+               size -= cur_size;
+               radeon_ring_write(rdev, PACKET0(0x720, 2));
+               radeon_ring_write(rdev, src_offset);
+               radeon_ring_write(rdev, dst_offset);
+               radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+               src_offset += cur_size;
+               dst_offset += cur_size;
+       }
+       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+       if (fence) {
+               r = radeon_fence_emit(rdev, fence);
+       }
+       radeon_ring_unlock_commit(rdev);
+       return r;
+}
+
+void r300_ring_start(struct radeon_device *rdev)
+{
+       unsigned gb_tile_config;
+       int r;
+
+       /* Sub pixel 1/12 so we can have 4K rendering according to doc */
+       gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+       switch (rdev->num_gb_pipes) {
+       case 2:
+               gb_tile_config |= R300_PIPE_COUNT_R300;
+               break;
+       case 3:
+               gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+               break;
+       case 4:
+               gb_tile_config |= R300_PIPE_COUNT_R420;
+               break;
+       case 1:
+       default:
+               gb_tile_config |= R300_PIPE_COUNT_RV350;
+               break;
+       }
+
+       r = radeon_ring_lock(rdev, 64);
+       if (r) {
+               return;
+       }
+       radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
+       radeon_ring_write(rdev,
+                         RADEON_ISYNC_ANY2D_IDLE3D |
+                         RADEON_ISYNC_ANY3D_IDLE2D |
+                         RADEON_ISYNC_WAIT_IDLEGUI |
+                         RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+       radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
+       radeon_ring_write(rdev, gb_tile_config);
+       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(rdev,
+                         RADEON_WAIT_2D_IDLECLEAN |
+                         RADEON_WAIT_3D_IDLECLEAN);
+       radeon_ring_write(rdev, PACKET0(0x170C, 0));
+       radeon_ring_write(rdev, 1 << 31);
+       radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+       radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(rdev,
+                         RADEON_WAIT_2D_IDLECLEAN |
+                         RADEON_WAIT_3D_IDLECLEAN);
+       radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+       radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+       radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
+       radeon_ring_write(rdev,
+                         ((6 << R300_MS_X0_SHIFT) |
+                          (6 << R300_MS_Y0_SHIFT) |
+                          (6 << R300_MS_X1_SHIFT) |
+                          (6 << R300_MS_Y1_SHIFT) |
+                          (6 << R300_MS_X2_SHIFT) |
+                          (6 << R300_MS_Y2_SHIFT) |
+                          (6 << R300_MSBD0_Y_SHIFT) |
+                          (6 << R300_MSBD0_X_SHIFT)));
+       radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
+       radeon_ring_write(rdev,
+                         ((6 << R300_MS_X3_SHIFT) |
+                          (6 << R300_MS_Y3_SHIFT) |
+                          (6 << R300_MS_X4_SHIFT) |
+                          (6 << R300_MS_Y4_SHIFT) |
+                          (6 << R300_MS_X5_SHIFT) |
+                          (6 << R300_MS_Y5_SHIFT) |
+                          (6 << R300_MSBD1_SHIFT)));
+       radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
+       radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+       radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
+       radeon_ring_write(rdev,
+                         R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
+       radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
+       radeon_ring_write(rdev,
+                         R300_GEOMETRY_ROUND_NEAREST |
+                         R300_COLOR_ROUND_NEAREST);
+       radeon_ring_unlock_commit(rdev);
+}
+
+void r300_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+
+       if (rdev->family == CHIP_R300 &&
+           (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
+               rdev->pll_errata |= CHIP_ERRATA_R300_CG;
+       }
+}
+
+int r300_mc_wait_for_idle(struct radeon_device *rdev)
+{
+       unsigned i;
+       uint32_t tmp;
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32(0x0150);
+               if (tmp & (1 << 4)) {
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       return -1;
+}
+
+void r300_gpu_init(struct radeon_device *rdev)
+{
+       uint32_t gb_tile_config, tmp;
+
+       r100_hdp_reset(rdev);
+       /* FIXME: rv380 one pipes ? */
+       if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
+               /* r300,r350 */
+               rdev->num_gb_pipes = 2;
+       } else {
+               /* rv350,rv370,rv380 */
+               rdev->num_gb_pipes = 1;
+       }
+       gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+       switch (rdev->num_gb_pipes) {
+       case 2:
+               gb_tile_config |= R300_PIPE_COUNT_R300;
+               break;
+       case 3:
+               gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+               break;
+       case 4:
+               gb_tile_config |= R300_PIPE_COUNT_R420;
+               break;
+       case 1:
+       default:
+               gb_tile_config |= R300_PIPE_COUNT_RV350;
+               break;
+       }
+       WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
+
+       if (r100_gui_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait GUI idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+
+       tmp = RREG32(0x170C);
+       WREG32(0x170C, tmp | (1 << 31));
+
+       WREG32(R300_RB2D_DSTCACHE_MODE,
+              R300_DC_AUTOFLUSH_ENABLE |
+              R300_DC_DC_DISABLE_IGNORE_PE);
+
+       if (r100_gui_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait GUI idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+       if (r300_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+       DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
+}
+
+int r300_ga_reset(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       bool reinit_cp;
+       int i;
+
+       reinit_cp = rdev->cp.ready;
+       rdev->cp.ready = false;
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               WREG32(RADEON_CP_CSQ_MODE, 0);
+               WREG32(RADEON_CP_CSQ_CNTL, 0);
+               WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
+               (void)RREG32(RADEON_RBBM_SOFT_RESET);
+               udelay(200);
+               WREG32(RADEON_RBBM_SOFT_RESET, 0);
+               /* Wait to prevent race in RBBM_STATUS */
+               mdelay(1);
+               tmp = RREG32(RADEON_RBBM_STATUS);
+               if (tmp & ((1 << 20) | (1 << 26))) {
+                       DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
+                       /* GA still busy soft reset it */
+                       WREG32(0x429C, 0x200);
+                       WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
+                       WREG32(0x43E0, 0);
+                       WREG32(0x43E4, 0);
+                       WREG32(0x24AC, 0);
+               }
+               /* Wait to prevent race in RBBM_STATUS */
+               mdelay(1);
+               tmp = RREG32(RADEON_RBBM_STATUS);
+               if (!(tmp & ((1 << 20) | (1 << 26)))) {
+                       break;
+               }
+       }
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(RADEON_RBBM_STATUS);
+               if (!(tmp & ((1 << 20) | (1 << 26)))) {
+                       DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
+                                tmp);
+                       if (reinit_cp) {
+                               return r100_cp_init(rdev, rdev->cp.ring_size);
+                       }
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       tmp = RREG32(RADEON_RBBM_STATUS);
+       DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
+       return -1;
+}
+
+int r300_gpu_reset(struct radeon_device *rdev)
+{
+       uint32_t status;
+
+       /* reset order likely matter */
+       status = RREG32(RADEON_RBBM_STATUS);
+       /* reset HDP */
+       r100_hdp_reset(rdev);
+       /* reset rb2d */
+       if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
+               r100_rb2d_reset(rdev);
+       }
+       /* reset GA */
+       if (status & ((1 << 20) | (1 << 26))) {
+               r300_ga_reset(rdev);
+       }
+       /* reset CP */
+       status = RREG32(RADEON_RBBM_STATUS);
+       if (status & (1 << 16)) {
+               r100_cp_reset(rdev);
+       }
+       /* Check if GPU is idle */
+       status = RREG32(RADEON_RBBM_STATUS);
+       if (status & (1 << 31)) {
+               DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+               return -1;
+       }
+       DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+       return 0;
+}
+
+
+/*
+ * r300,r350,rv350,rv380 VRAM info
+ */
+void r300_vram_info(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       /* DDR for all card after R300 & IGP */
+       rdev->mc.vram_is_ddr = true;
+       tmp = RREG32(RADEON_MEM_CNTL);
+       if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
+               rdev->mc.vram_width = 128;
+       } else {
+               rdev->mc.vram_width = 64;
+       }
+       rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+
+       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       uint32_t r;
+
+       WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
+       (void)RREG32(RADEON_PCIE_INDEX);
+       r = RREG32(RADEON_PCIE_DATA);
+       return r;
+}
+
+void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
+       (void)RREG32(RADEON_PCIE_INDEX);
+       WREG32(RADEON_PCIE_DATA, (v));
+       (void)RREG32(RADEON_PCIE_DATA);
+}
+
+/*
+ * PCIE Lanes
+ */
+
+void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+       uint32_t link_width_cntl, mask;
+
+       if (rdev->flags & RADEON_IS_IGP)
+               return;
+
+       if (!(rdev->flags & RADEON_IS_PCIE))
+               return;
+
+       /* FIXME wait for idle */
+
+       switch (lanes) {
+       case 0:
+               mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+               break;
+       case 1:
+               mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+               break;
+       case 2:
+               mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+               break;
+       case 4:
+               mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+               break;
+       case 8:
+               mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+               break;
+       case 12:
+               mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+               break;
+       case 16:
+       default:
+               mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+               break;
+       }
+
+       link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+       if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+           (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+               return;
+
+       link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+                            RADEON_PCIE_LC_RECONFIG_NOW |
+                            RADEON_PCIE_LC_RECONFIG_LATER |
+                            RADEON_PCIE_LC_SHORT_RECONFIG_EN);
+       link_width_cntl |= mask;
+       WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+       WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+                                                    RADEON_PCIE_LC_RECONFIG_NOW));
+
+       /* wait for lane set to complete */
+       link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+       while (link_width_cntl == 0xffffffff)
+               link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t tmp;
+
+       tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+       seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
+       tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
+       seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
+       tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
+       seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
+       tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
+       seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
+       tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
+       seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
+       tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
+       seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
+       tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
+       seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
+       return 0;
+}
+
+static struct drm_info_list rv370_pcie_gart_info_list[] = {
+       {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
+};
+#endif
+
+int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
+#else
+       return 0;
+#endif
+}
+
+
+/*
+ * CS functions
+ */
+struct r300_cs_track_cb {
+       struct radeon_object    *robj;
+       unsigned                pitch;
+       unsigned                cpp;
+       unsigned                offset;
+};
+
+struct r300_cs_track {
+       unsigned                num_cb;
+       unsigned                maxy;
+       struct r300_cs_track_cb cb[4];
+       struct r300_cs_track_cb zb;
+       bool                    z_enabled;
+};
+
+int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
+{
+       unsigned i;
+       unsigned long size;
+
+       for (i = 0; i < track->num_cb; i++) {
+               if (track->cb[i].robj == NULL) {
+                       DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+                       return -EINVAL;
+               }
+               size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
+               size += track->cb[i].offset;
+               if (size > radeon_object_size(track->cb[i].robj)) {
+                       DRM_ERROR("[drm] Buffer too small for color buffer %d "
+                                 "(need %lu have %lu) !\n", i, size,
+                                 radeon_object_size(track->cb[i].robj));
+                       DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
+                                 i, track->cb[i].pitch, track->cb[i].cpp,
+                                 track->cb[i].offset, track->maxy);
+                       return -EINVAL;
+               }
+       }
+       if (track->z_enabled) {
+               if (track->zb.robj == NULL) {
+                       DRM_ERROR("[drm] No buffer for z buffer !\n");
+                       return -EINVAL;
+               }
+               size = track->zb.pitch * track->zb.cpp * track->maxy;
+               size += track->zb.offset;
+               if (size > radeon_object_size(track->zb.robj)) {
+                       DRM_ERROR("[drm] Buffer too small for z buffer "
+                                 "(need %lu have %lu) !\n", size,
+                                 radeon_object_size(track->zb.robj));
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static inline void r300_cs_track_clear(struct r300_cs_track *track)
+{
+       unsigned i;
+
+       track->num_cb = 4;
+       track->maxy = 4096;
+       for (i = 0; i < track->num_cb; i++) {
+               track->cb[i].robj = NULL;
+               track->cb[i].pitch = 8192;
+               track->cb[i].cpp = 16;
+               track->cb[i].offset = 0;
+       }
+       track->z_enabled = true;
+       track->zb.robj = NULL;
+       track->zb.pitch = 8192;
+       track->zb.cpp = 4;
+       track->zb.offset = 0;
+}
+
+static unsigned r300_auth_reg[] = {
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+       0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFCFCC, 0xF00E9FFF, 0x007C0000,
+       0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+       0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFC, 0xFFFFFFFF,
+       0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
+       0x00000000, 0x00000000, 0xFFFF0000, 0x00000000,
+       0x00000000, 0x0000C100, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
+};
+
+static int r300_packet0_check(struct radeon_cs_parser *p,
+               struct radeon_cs_packet *pkt,
+               unsigned idx, unsigned reg)
+{
+       struct radeon_cs_chunk *ib_chunk;
+       struct radeon_cs_reloc *reloc;
+       struct r300_cs_track *track;
+       volatile uint32_t *ib;
+       uint32_t tmp;
+       unsigned i;
+       int r;
+
+       ib = p->ib->ptr;
+       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       track = (struct r300_cs_track *)p->track;
+       switch (reg) {
+       case RADEON_DST_PITCH_OFFSET:
+       case RADEON_SRC_PITCH_OFFSET:
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+                                       idx, reg);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               tmp = ib_chunk->kdata[idx] & 0x003fffff;
+               tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+               ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
+               break;
+       case R300_RB3D_COLOROFFSET0:
+       case R300_RB3D_COLOROFFSET1:
+       case R300_RB3D_COLOROFFSET2:
+       case R300_RB3D_COLOROFFSET3:
+               i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+                                       idx, reg);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               track->cb[i].robj = reloc->robj;
+               track->cb[i].offset = ib_chunk->kdata[idx];
+               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               break;
+       case R300_ZB_DEPTHOFFSET:
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+                                       idx, reg);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               track->zb.robj = reloc->robj;
+               track->zb.offset = ib_chunk->kdata[idx];
+               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               break;
+       case R300_TX_OFFSET_0:
+       case R300_TX_OFFSET_0+4:
+       case R300_TX_OFFSET_0+8:
+       case R300_TX_OFFSET_0+12:
+       case R300_TX_OFFSET_0+16:
+       case R300_TX_OFFSET_0+20:
+       case R300_TX_OFFSET_0+24:
+       case R300_TX_OFFSET_0+28:
+       case R300_TX_OFFSET_0+32:
+       case R300_TX_OFFSET_0+36:
+       case R300_TX_OFFSET_0+40:
+       case R300_TX_OFFSET_0+44:
+       case R300_TX_OFFSET_0+48:
+       case R300_TX_OFFSET_0+52:
+       case R300_TX_OFFSET_0+56:
+       case R300_TX_OFFSET_0+60:
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+                                       idx, reg);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               break;
+       /* Tracked registers */
+       case 0x43E4:
+               /* SC_SCISSOR1 */
+
+               track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
+               if (p->rdev->family < CHIP_RV515) {
+                       track->maxy -= 1440;
+               }
+               break;
+       case 0x4E00:
+               /* RB3D_CCTL */
+               track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
+               break;
+       case 0x4E38:
+       case 0x4E3C:
+       case 0x4E40:
+       case 0x4E44:
+               /* RB3D_COLORPITCH0 */
+               /* RB3D_COLORPITCH1 */
+               /* RB3D_COLORPITCH2 */
+               /* RB3D_COLORPITCH3 */
+               i = (reg - 0x4E38) >> 2;
+               track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
+               switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
+               case 9:
+               case 11:
+               case 12:
+                       track->cb[i].cpp = 1;
+                       break;
+               case 3:
+               case 4:
+               case 13:
+               case 15:
+                       track->cb[i].cpp = 2;
+                       break;
+               case 6:
+                       track->cb[i].cpp = 4;
+                       break;
+               case 10:
+                       track->cb[i].cpp = 8;
+                       break;
+               case 7:
+                       track->cb[i].cpp = 16;
+                       break;
+               default:
+                       DRM_ERROR("Invalid color buffer format (%d) !\n",
+                                 ((ib_chunk->kdata[idx] >> 21) & 0xF));
+                       return -EINVAL;
+               }
+               break;
+       case 0x4F00:
+               /* ZB_CNTL */
+               if (ib_chunk->kdata[idx] & 2) {
+                       track->z_enabled = true;
+               } else {
+                       track->z_enabled = false;
+               }
+               break;
+       case 0x4F10:
+               /* ZB_FORMAT */
+               switch ((ib_chunk->kdata[idx] & 0xF)) {
+               case 0:
+               case 1:
+                       track->zb.cpp = 2;
+                       break;
+               case 2:
+                       track->zb.cpp = 4;
+                       break;
+               default:
+                       DRM_ERROR("Invalid z buffer format (%d) !\n",
+                                 (ib_chunk->kdata[idx] & 0xF));
+                       return -EINVAL;
+               }
+               break;
+       case 0x4F24:
+               /* ZB_DEPTHPITCH */
+               track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
+               break;
+       default:
+               printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", reg, idx);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int r300_packet3_check(struct radeon_cs_parser *p,
+                             struct radeon_cs_packet *pkt)
+{
+       struct radeon_cs_chunk *ib_chunk;
+       struct radeon_cs_reloc *reloc;
+       struct r300_cs_track *track;
+       volatile uint32_t *ib;
+       unsigned idx;
+       unsigned i, c;
+       int r;
+
+       ib = p->ib->ptr;
+       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       idx = pkt->idx + 1;
+       track = (struct r300_cs_track *)p->track;
+       switch (pkt->opcode) {
+       case PACKET3_3D_LOAD_VBPNTR:
+               c = ib_chunk->kdata[idx++];
+               for (i = 0; i < (c - 1); i += 2, idx += 3) {
+                       r = r100_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("No reloc for packet3 %d\n",
+                                         pkt->opcode);
+                               r100_cs_dump_packet(p, pkt);
+                               return r;
+                       }
+                       ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+                       r = r100_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("No reloc for packet3 %d\n",
+                                         pkt->opcode);
+                               r100_cs_dump_packet(p, pkt);
+                               return r;
+                       }
+                       ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
+               }
+               if (c & 1) {
+                       r = r100_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("No reloc for packet3 %d\n",
+                                         pkt->opcode);
+                               r100_cs_dump_packet(p, pkt);
+                               return r;
+                       }
+                       ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+               }
+               break;
+       case PACKET3_INDX_BUFFER:
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+               break;
+       /* Draw packet */
+       case PACKET3_3D_DRAW_VBUF:
+       case PACKET3_3D_DRAW_IMMD:
+       case PACKET3_3D_DRAW_INDX:
+       case PACKET3_3D_DRAW_VBUF_2:
+       case PACKET3_3D_DRAW_IMMD_2:
+       case PACKET3_3D_DRAW_INDX_2:
+               r = r300_cs_track_check(p->rdev, track);
+               if (r) {
+                       return r;
+               }
+               break;
+       case PACKET3_NOP:
+               break;
+       default:
+               DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int r300_cs_parse(struct radeon_cs_parser *p)
+{
+       struct radeon_cs_packet pkt;
+       struct r300_cs_track track;
+       int r;
+
+       r300_cs_track_clear(&track);
+       p->track = &track;
+       do {
+               r = r100_cs_packet_parse(p, &pkt, p->idx);
+               if (r) {
+                       return r;
+               }
+               p->idx += pkt.count + 2;
+               switch (pkt.type) {
+               case PACKET_TYPE0:
+                       r = r100_cs_parse_packet0(p, &pkt,
+                                                 r300_auth_reg,
+                                                 ARRAY_SIZE(r300_auth_reg),
+                                                 &r300_packet0_check);
+                       break;
+               case PACKET_TYPE2:
+                       break;
+               case PACKET_TYPE3:
+                       r = r300_packet3_check(p, &pkt);
+                       break;
+               default:
+                       DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+                       return -EINVAL;
+               }
+               if (r) {
+                       return r;
+               }
+       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       return 0;
+}
index bdbc95f..70f4860 100644 (file)
@@ -1,30 +1,34 @@
-/**************************************************************************
-
-Copyright (C) 2004-2005 Nicolai Haehnle et al.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the "Software"),
-to deal in the Software without restriction, including without limitation
-on the rights to use, copy, modify, merge, publish, distribute, sub
-license, and/or sell copies of the Software, and to permit persons to whom
-the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice (including the next
-paragraph) shall be included in all copies or substantial portions of the
-Software.
+/*
+ * Copyright 2005 Nicolai Haehnle et al.
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Nicolai Haehnle
+ *          Jerome Glisse
+ */
+#ifndef _R300_REG_H_
+#define _R300_REG_H_
 
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
-DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-USE OR OTHER DEALINGS IN THE SOFTWARE.
 
-**************************************************************************/
 
-#ifndef _R300_REG_H
-#define _R300_REG_H
 
 #define R300_MC_INIT_MISC_LAT_TIMER    0x180
 #      define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT      0
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
new file mode 100644 (file)
index 0000000..dea497a
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* r420,r423,rv410 depends on : */
+void r100_pci_gart_disable(struct radeon_device *rdev);
+void r100_hdp_reset(struct radeon_device *rdev);
+void r100_mc_setup(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+void r100_mc_disable_clients(struct radeon_device *rdev);
+void r300_vram_info(struct radeon_device *rdev);
+int r300_mc_wait_for_idle(struct radeon_device *rdev);
+int rv370_pcie_gart_enable(struct radeon_device *rdev);
+void rv370_pcie_gart_disable(struct radeon_device *rdev);
+
+/* This files gather functions specifics to :
+ * r420,r423,rv410
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void r420_gpu_init(struct radeon_device *rdev);
+int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int r420_mc_init(struct radeon_device *rdev)
+{
+       int r;
+
+       if (r100_debugfs_rbbm_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+       }
+       if (r420_debugfs_pipes_info_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for pipes !\n");
+       }
+
+       r420_gpu_init(rdev);
+       r100_pci_gart_disable(rdev);
+       if (rdev->flags & RADEON_IS_PCIE) {
+               rv370_pcie_gart_disable(rdev);
+       }
+
+       /* Setup GPU memory space */
+       rdev->mc.vram_location = 0xFFFFFFFFUL;
+       rdev->mc.gtt_location = 0xFFFFFFFFUL;
+       if (rdev->flags & RADEON_IS_AGP) {
+               r = radeon_agp_init(rdev);
+               if (r) {
+                       printk(KERN_WARNING "[drm] Disabling AGP\n");
+                       rdev->flags &= ~RADEON_IS_AGP;
+                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+               } else {
+                       rdev->mc.gtt_location = rdev->mc.agp_base;
+               }
+       }
+       r = radeon_mc_setup(rdev);
+       if (r) {
+               return r;
+       }
+
+       /* Program GPU memory space */
+       r100_mc_disable_clients(rdev);
+       if (r300_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+       r100_mc_setup(rdev);
+       return 0;
+}
+
+void r420_mc_fini(struct radeon_device *rdev)
+{
+       rv370_pcie_gart_disable(rdev);
+       radeon_gart_table_vram_free(rdev);
+       radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+void r420_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+}
+
+void r420_pipes_init(struct radeon_device *rdev)
+{
+       unsigned tmp;
+       unsigned gb_pipe_select;
+       unsigned num_pipes;
+
+       /* GA_ENHANCE workaround TCL deadlock issue */
+       WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
+       /* get max number of pipes */
+       gb_pipe_select = RREG32(0x402C);
+       num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
+       rdev->num_gb_pipes = num_pipes;
+       tmp = 0;
+       switch (num_pipes) {
+       default:
+               /* force to 1 pipe */
+               num_pipes = 1;
+       case 1:
+               tmp = (0 << 1);
+               break;
+       case 2:
+               tmp = (3 << 1);
+               break;
+       case 3:
+               tmp = (6 << 1);
+               break;
+       case 4:
+               tmp = (7 << 1);
+               break;
+       }
+       WREG32(0x42C8, (1 << num_pipes) - 1);
+       /* Sub pixel 1/12 so we can have 4K rendering according to doc */
+       tmp |= (1 << 4) | (1 << 0);
+       WREG32(0x4018, tmp);
+       if (r100_gui_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait GUI idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+
+       tmp = RREG32(0x170C);
+       WREG32(0x170C, tmp | (1 << 31));
+
+       WREG32(R300_RB2D_DSTCACHE_MODE,
+              RREG32(R300_RB2D_DSTCACHE_MODE) |
+              R300_DC_AUTOFLUSH_ENABLE |
+              R300_DC_DC_DISABLE_IGNORE_PE);
+
+       if (r100_gui_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait GUI idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+       DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
+}
+
+void r420_gpu_init(struct radeon_device *rdev)
+{
+       r100_hdp_reset(rdev);
+       r420_pipes_init(rdev);
+       if (r300_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+}
+
+
+/*
+ * r420,r423,rv410 VRAM info
+ */
+void r420_vram_info(struct radeon_device *rdev)
+{
+       r300_vram_info(rdev);
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t tmp;
+
+       tmp = RREG32(R400_GB_PIPE_SELECT);
+       seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+       tmp = RREG32(R300_GB_TILE_CONFIG);
+       seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+       tmp = RREG32(R300_DST_PIPE_CONFIG);
+       seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+       return 0;
+}
+
+static struct drm_info_list r420_pipes_info_list[] = {
+       {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
+};
+#endif
+
+int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
+#else
+       return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
new file mode 100644 (file)
index 0000000..9070a1c
--- /dev/null
@@ -0,0 +1,749 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R500_REG_H__
+#define __R500_REG_H__
+
+/* pipe config regs */
+#define R300_GA_POLY_MODE                              0x4288
+#       define R300_FRONT_PTYPE_POINT                   (0 << 4)
+#       define R300_FRONT_PTYPE_LINE                    (1 << 4)
+#       define R300_FRONT_PTYPE_TRIANGE                 (2 << 4)
+#       define R300_BACK_PTYPE_POINT                    (0 << 7)
+#       define R300_BACK_PTYPE_LINE                     (1 << 7)
+#       define R300_BACK_PTYPE_TRIANGE                  (2 << 7)
+#define R300_GA_ROUND_MODE                             0x428c
+#       define R300_GEOMETRY_ROUND_TRUNC                (0 << 0)
+#       define R300_GEOMETRY_ROUND_NEAREST              (1 << 0)
+#       define R300_COLOR_ROUND_TRUNC                   (0 << 2)
+#       define R300_COLOR_ROUND_NEAREST                 (1 << 2)
+#define R300_GB_MSPOS0                                 0x4010
+#       define R300_MS_X0_SHIFT                         0
+#       define R300_MS_Y0_SHIFT                         4
+#       define R300_MS_X1_SHIFT                         8
+#       define R300_MS_Y1_SHIFT                         12
+#       define R300_MS_X2_SHIFT                         16
+#       define R300_MS_Y2_SHIFT                         20
+#       define R300_MSBD0_Y_SHIFT                       24
+#       define R300_MSBD0_X_SHIFT                       28
+#define R300_GB_MSPOS1                                 0x4014
+#       define R300_MS_X3_SHIFT                         0
+#       define R300_MS_Y3_SHIFT                         4
+#       define R300_MS_X4_SHIFT                         8
+#       define R300_MS_Y4_SHIFT                         12
+#       define R300_MS_X5_SHIFT                         16
+#       define R300_MS_Y5_SHIFT                         20
+#       define R300_MSBD1_SHIFT                         24
+
+#define R300_GA_ENHANCE                                        0x4274
+#       define R300_GA_DEADLOCK_CNTL                    (1 << 0)
+#       define R300_GA_FASTSYNC_CNTL                    (1 << 1)
+#define R300_RB3D_DSTCACHE_CTLSTAT              0x4e4c
+#      define R300_RB3D_DC_FLUSH               (2 << 0)
+#      define R300_RB3D_DC_FREE                (2 << 2)
+#      define R300_RB3D_DC_FINISH              (1 << 4)
+#define R300_RB3D_ZCACHE_CTLSTAT                       0x4f18
+#       define R300_ZC_FLUSH                            (1 << 0)
+#       define R300_ZC_FREE                             (1 << 1)
+#       define R300_ZC_FLUSH_ALL                        0x3
+#define R400_GB_PIPE_SELECT             0x402c
+#define R500_DYN_SCLK_PWMEM_PIPE        0x000d /* PLL */
+#define R500_SU_REG_DEST                0x42c8
+#define R300_GB_TILE_CONFIG             0x4018
+#       define R300_ENABLE_TILING       (1 << 0)
+#       define R300_PIPE_COUNT_RV350    (0 << 1)
+#       define R300_PIPE_COUNT_R300     (3 << 1)
+#       define R300_PIPE_COUNT_R420_3P  (6 << 1)
+#       define R300_PIPE_COUNT_R420     (7 << 1)
+#       define R300_TILE_SIZE_8         (0 << 4)
+#       define R300_TILE_SIZE_16        (1 << 4)
+#       define R300_TILE_SIZE_32        (2 << 4)
+#       define R300_SUBPIXEL_1_12       (0 << 16)
+#       define R300_SUBPIXEL_1_16       (1 << 16)
+#define R300_DST_PIPE_CONFIG            0x170c
+#       define R300_PIPE_AUTO_CONFIG    (1 << 31)
+#define R300_RB2D_DSTCACHE_MODE         0x3428
+#       define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
+#       define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
+
+#define RADEON_CP_STAT         0x7C0
+#define RADEON_RBBM_CMDFIFO_ADDR       0xE70
+#define RADEON_RBBM_CMDFIFO_DATA       0xE74
+#define RADEON_ISYNC_CNTL              0x1724
+#      define RADEON_ISYNC_ANY2D_IDLE3D        (1 << 0)
+#      define RADEON_ISYNC_ANY3D_IDLE2D        (1 << 1)
+#      define RADEON_ISYNC_TRIG2D_IDLE3D       (1 << 2)
+#      define RADEON_ISYNC_TRIG3D_IDLE2D       (1 << 3)
+#      define RADEON_ISYNC_WAIT_IDLEGUI        (1 << 4)
+#      define RADEON_ISYNC_CPSCRATCH_IDLEGUI   (1 << 5)
+
+#define RS480_NB_MC_INDEX               0x168
+#      define RS480_NB_MC_IND_WR_EN    (1 << 8)
+#define RS480_NB_MC_DATA                0x16c
+
+/*
+ * RS690
+ */
+#define RS690_MCCFG_FB_LOCATION                0x100
+#define                RS690_MC_FB_START_MASK          0x0000FFFF
+#define                RS690_MC_FB_START_SHIFT         0
+#define                RS690_MC_FB_TOP_MASK            0xFFFF0000
+#define                RS690_MC_FB_TOP_SHIFT           16
+#define RS690_MCCFG_AGP_LOCATION       0x101
+#define                RS690_MC_AGP_START_MASK         0x0000FFFF
+#define                RS690_MC_AGP_START_SHIFT        0
+#define                RS690_MC_AGP_TOP_MASK           0xFFFF0000
+#define                RS690_MC_AGP_TOP_SHIFT          16
+#define RS690_MCCFG_AGP_BASE           0x102
+#define RS690_MCCFG_AGP_BASE_2         0x103
+#define RS690_MC_INIT_MISC_LAT_TIMER            0x104
+#define RS690_HDP_FB_LOCATION          0x0134
+#define RS690_MC_INDEX                         0x78
+#      define RS690_MC_INDEX_MASK              0x1ff
+#      define RS690_MC_INDEX_WR_EN             (1 << 9)
+#      define RS690_MC_INDEX_WR_ACK            0x7f
+#define RS690_MC_DATA                          0x7c
+#define RS690_MC_STATUS                         0x90
+#define RS690_MC_STATUS_IDLE                    (1 << 0)
+#define RS480_AGP_BASE_2               0x0164
+#define RS480_MC_MISC_CNTL              0x18
+#      define RS480_DISABLE_GTW        (1 << 1)
+#      define RS480_GART_INDEX_REG_EN  (1 << 12)
+#      define RS690_BLOCK_GFX_D3_EN    (1 << 14)
+#define RS480_GART_FEATURE_ID           0x2b
+#      define RS480_HANG_EN            (1 << 11)
+#      define RS480_TLB_ENABLE         (1 << 18)
+#      define RS480_P2P_ENABLE         (1 << 19)
+#      define RS480_GTW_LAC_EN         (1 << 25)
+#      define RS480_2LEVEL_GART        (0 << 30)
+#      define RS480_1LEVEL_GART        (1 << 30)
+#      define RS480_PDC_EN             (1 << 31)
+#define RS480_GART_BASE                 0x2c
+#define RS480_GART_CACHE_CNTRL          0x2e
+#      define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
+#define RS480_AGP_ADDRESS_SPACE_SIZE    0x38
+#      define RS480_GART_EN            (1 << 0)
+#      define RS480_VA_SIZE_32MB       (0 << 1)
+#      define RS480_VA_SIZE_64MB       (1 << 1)
+#      define RS480_VA_SIZE_128MB      (2 << 1)
+#      define RS480_VA_SIZE_256MB      (3 << 1)
+#      define RS480_VA_SIZE_512MB      (4 << 1)
+#      define RS480_VA_SIZE_1GB        (5 << 1)
+#      define RS480_VA_SIZE_2GB        (6 << 1)
+#define RS480_AGP_MODE_CNTL             0x39
+#      define RS480_POST_GART_Q_SIZE   (1 << 18)
+#      define RS480_NONGART_SNOOP      (1 << 19)
+#      define RS480_AGP_RD_BUF_SIZE    (1 << 20)
+#      define RS480_REQ_TYPE_SNOOP_SHIFT 22
+#      define RS480_REQ_TYPE_SNOOP_MASK  0x3
+#      define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
+
+#define RS690_AIC_CTRL_SCRATCH         0x3A
+#      define RS690_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
+
+/*
+ * RS600
+ */
+#define RS600_MC_STATUS                         0x0
+#define RS600_MC_STATUS_IDLE                    (1 << 0)
+#define RS600_MC_INDEX                          0x70
+#       define RS600_MC_ADDR_MASK               0xffff
+#       define RS600_MC_IND_SEQ_RBS_0           (1 << 16)
+#       define RS600_MC_IND_SEQ_RBS_1           (1 << 17)
+#       define RS600_MC_IND_SEQ_RBS_2           (1 << 18)
+#       define RS600_MC_IND_SEQ_RBS_3           (1 << 19)
+#       define RS600_MC_IND_AIC_RBS             (1 << 20)
+#       define RS600_MC_IND_CITF_ARB0           (1 << 21)
+#       define RS600_MC_IND_CITF_ARB1           (1 << 22)
+#       define RS600_MC_IND_WR_EN               (1 << 23)
+#define RS600_MC_DATA                           0x74
+#define RS600_MC_STATUS                         0x0
+#       define RS600_MC_IDLE                    (1 << 1)
+#define RS600_MC_FB_LOCATION                    0x4
+#define                RS600_MC_FB_START_MASK          0x0000FFFF
+#define                RS600_MC_FB_START_SHIFT         0
+#define                RS600_MC_FB_TOP_MASK            0xFFFF0000
+#define                RS600_MC_FB_TOP_SHIFT           16
+#define RS600_MC_AGP_LOCATION                   0x5
+#define                RS600_MC_AGP_START_MASK         0x0000FFFF
+#define                RS600_MC_AGP_START_SHIFT        0
+#define                RS600_MC_AGP_TOP_MASK           0xFFFF0000
+#define                RS600_MC_AGP_TOP_SHIFT          16
+#define RS600_MC_AGP_BASE                          0x6
+#define RS600_MC_AGP_BASE_2                        0x7
+#define RS600_MC_CNTL1                          0x9
+#       define RS600_ENABLE_PAGE_TABLES         (1 << 26)
+#define RS600_MC_PT0_CNTL                       0x100
+#       define RS600_ENABLE_PT                  (1 << 0)
+#       define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
+#       define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
+#       define RS600_INVALIDATE_ALL_L1_TLBS     (1 << 28)
+#       define RS600_INVALIDATE_L2_CACHE        (1 << 29)
+#define RS600_MC_PT0_CONTEXT0_CNTL              0x102
+#       define RS600_ENABLE_PAGE_TABLE          (1 << 0)
+#       define RS600_PAGE_TABLE_TYPE_FLAT       (0 << 1)
+#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR   0x112
+#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR  0x114
+#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
+#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR    0x12c
+#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR   0x13c
+#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR     0x14c
+#define RS600_MC_PT0_CLIENT0_CNTL               0x16c
+#       define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE       (1 << 0)
+#       define RS600_TRANSLATION_MODE_OVERRIDE              (1 << 1)
+#       define RS600_SYSTEM_ACCESS_MODE_MASK                (3 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_PA_ONLY             (0 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP         (1 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_IN_SYS              (2 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS          (3 << 8)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH        (0 << 10)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE       (1 << 10)
+#       define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
+#       define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
+#       define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
+#       define RS600_INVALIDATE_L1_TLB          (1 << 20)
+/* rs600/rs690/rs740 */
+#      define RS600_BUS_MASTER_DIS             (1 << 14)
+#      define RS600_MSI_REARM                  (1 << 20)
+/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
+
+
+
+#define RV515_MC_FB_LOCATION           0x01
+#define                RV515_MC_FB_START_MASK          0x0000FFFF
+#define                RV515_MC_FB_START_SHIFT         0
+#define                RV515_MC_FB_TOP_MASK            0xFFFF0000
+#define                RV515_MC_FB_TOP_SHIFT           16
+#define RV515_MC_AGP_LOCATION          0x02
+#define                RV515_MC_AGP_START_MASK         0x0000FFFF
+#define                RV515_MC_AGP_START_SHIFT        0
+#define                RV515_MC_AGP_TOP_MASK           0xFFFF0000
+#define                RV515_MC_AGP_TOP_SHIFT          16
+#define RV515_MC_AGP_BASE              0x03
+#define RV515_MC_AGP_BASE_2            0x04
+
+#define R520_MC_FB_LOCATION            0x04
+#define                R520_MC_FB_START_MASK           0x0000FFFF
+#define                R520_MC_FB_START_SHIFT          0
+#define                R520_MC_FB_TOP_MASK             0xFFFF0000
+#define                R520_MC_FB_TOP_SHIFT            16
+#define R520_MC_AGP_LOCATION           0x05
+#define                R520_MC_AGP_START_MASK          0x0000FFFF
+#define                R520_MC_AGP_START_SHIFT         0
+#define                R520_MC_AGP_TOP_MASK            0xFFFF0000
+#define                R520_MC_AGP_TOP_SHIFT           16
+#define R520_MC_AGP_BASE               0x06
+#define R520_MC_AGP_BASE_2             0x07
+
+
+#define AVIVO_MC_INDEX                                         0x0070
+#define R520_MC_STATUS 0x00
+#define R520_MC_STATUS_IDLE (1<<1)
+#define RV515_MC_STATUS 0x08
+#define RV515_MC_STATUS_IDLE (1<<4)
+#define RV515_MC_INIT_MISC_LAT_TIMER            0x09
+#define AVIVO_MC_DATA                                          0x0074
+
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1 << 24)
+#define R520_MC_IND_DATA  0x74
+
+#define RV515_MC_CNTL          0x5
+#      define RV515_MEM_NUM_CHANNELS_MASK  0x3
+#define R520_MC_CNTL0          0x8
+#      define R520_MEM_NUM_CHANNELS_MASK  (0x3 << 24)
+#      define R520_MEM_NUM_CHANNELS_SHIFT  24
+#      define R520_MC_CHANNEL_SIZE  (1 << 23)
+
+#define AVIVO_CP_DYN_CNTL                              0x000f /* PLL */
+#       define AVIVO_CP_FORCEON                        (1 << 0)
+#define AVIVO_E2_DYN_CNTL                              0x0011 /* PLL */
+#       define AVIVO_E2_FORCEON                        (1 << 0)
+#define AVIVO_IDCT_DYN_CNTL                            0x0013 /* PLL */
+#       define AVIVO_IDCT_FORCEON                      (1 << 0)
+
+#define AVIVO_HDP_FB_LOCATION 0x134
+
+#define AVIVO_VGA_RENDER_CONTROL                               0x0300
+#       define AVIVO_VGA_VSTATUS_CNTL_MASK                      (3 << 16)
+#define AVIVO_D1VGA_CONTROL                                    0x0330
+#       define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0)
+#       define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8)
+#       define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16)
+#       define AVIVO_DVGA_CONTROL_ROTATE (1<<24)
+#define AVIVO_D2VGA_CONTROL                                    0x0338
+
+#define AVIVO_EXT1_PPLL_REF_DIV_SRC                             0x400
+#define AVIVO_EXT1_PPLL_REF_DIV                                 0x404
+#define AVIVO_EXT1_PPLL_UPDATE_LOCK                             0x408
+#define AVIVO_EXT1_PPLL_UPDATE_CNTL                             0x40c
+
+#define AVIVO_EXT2_PPLL_REF_DIV_SRC                             0x410
+#define AVIVO_EXT2_PPLL_REF_DIV                                 0x414
+#define AVIVO_EXT2_PPLL_UPDATE_LOCK                             0x418
+#define AVIVO_EXT2_PPLL_UPDATE_CNTL                             0x41c
+
+#define AVIVO_EXT1_PPLL_FB_DIV                                   0x430
+#define AVIVO_EXT2_PPLL_FB_DIV                                   0x434
+
+#define AVIVO_EXT1_PPLL_POST_DIV_SRC                                 0x438
+#define AVIVO_EXT1_PPLL_POST_DIV                                     0x43c
+
+#define AVIVO_EXT2_PPLL_POST_DIV_SRC                                 0x440
+#define AVIVO_EXT2_PPLL_POST_DIV                                     0x444
+
+#define AVIVO_EXT1_PPLL_CNTL                                    0x448
+#define AVIVO_EXT2_PPLL_CNTL                                    0x44c
+
+#define AVIVO_P1PLL_CNTL                                        0x450
+#define AVIVO_P2PLL_CNTL                                        0x454
+#define AVIVO_P1PLL_INT_SS_CNTL                                 0x458
+#define AVIVO_P2PLL_INT_SS_CNTL                                 0x45c
+#define AVIVO_P1PLL_TMDSA_CNTL                                  0x460
+#define AVIVO_P2PLL_LVTMA_CNTL                                  0x464
+
+#define AVIVO_PCLK_CRTC1_CNTL                                   0x480
+#define AVIVO_PCLK_CRTC2_CNTL                                   0x484
+
+#define AVIVO_D1CRTC_H_TOTAL                                   0x6000
+#define AVIVO_D1CRTC_H_BLANK_START_END                          0x6004
+#define AVIVO_D1CRTC_H_SYNC_A                                   0x6008
+#define AVIVO_D1CRTC_H_SYNC_A_CNTL                              0x600c
+#define AVIVO_D1CRTC_H_SYNC_B                                   0x6010
+#define AVIVO_D1CRTC_H_SYNC_B_CNTL                              0x6014
+
+#define AVIVO_D1CRTC_V_TOTAL                                   0x6020
+#define AVIVO_D1CRTC_V_BLANK_START_END                          0x6024
+#define AVIVO_D1CRTC_V_SYNC_A                                   0x6028
+#define AVIVO_D1CRTC_V_SYNC_A_CNTL                              0x602c
+#define AVIVO_D1CRTC_V_SYNC_B                                   0x6030
+#define AVIVO_D1CRTC_V_SYNC_B_CNTL                              0x6034
+
+#define AVIVO_D1CRTC_CONTROL                                    0x6080
+#       define AVIVO_CRTC_EN                                    (1 << 0)
+#define AVIVO_D1CRTC_BLANK_CONTROL                              0x6084
+#define AVIVO_D1CRTC_INTERLACE_CONTROL                          0x6088
+#define AVIVO_D1CRTC_INTERLACE_STATUS                           0x608c
+#define AVIVO_D1CRTC_STEREO_CONTROL                             0x60c4
+
+/* master controls */
+#define AVIVO_DC_CRTC_MASTER_EN                                 0x60f8
+#define AVIVO_DC_CRTC_TV_CONTROL                                0x60fc
+
+#define AVIVO_D1GRPH_ENABLE                                     0x6100
+#define AVIVO_D1GRPH_CONTROL                                    0x6104
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP                  (0 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP                 (1 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP                 (2 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP                 (3 << 0)
+
+#       define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED                (0 << 8)
+
+#       define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555              (0 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_RGB565                (1 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444              (2 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_AI88                  (3 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_MONO16                (4 << 8)
+
+#       define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888              (0 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010           (1 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL               (2 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010        (3 << 8)
+
+
+#       define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616          (0 << 8)
+
+#       define AVIVO_D1GRPH_SWAP_RB                             (1 << 16)
+#       define AVIVO_D1GRPH_TILED                               (1 << 20)
+#       define AVIVO_D1GRPH_MACRO_ADDRESS_MODE                  (1 << 21)
+
+#define AVIVO_D1GRPH_LUT_SEL                                    0x6108
+#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
+#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS                  0x6118
+#define AVIVO_D1GRPH_PITCH                                      0x6120
+#define AVIVO_D1GRPH_SURFACE_OFFSET_X                           0x6124
+#define AVIVO_D1GRPH_SURFACE_OFFSET_Y                           0x6128
+#define AVIVO_D1GRPH_X_START                                    0x612c
+#define AVIVO_D1GRPH_Y_START                                    0x6130
+#define AVIVO_D1GRPH_X_END                                      0x6134
+#define AVIVO_D1GRPH_Y_END                                      0x6138
+#define AVIVO_D1GRPH_UPDATE                                     0x6144
+#       define AVIVO_D1GRPH_UPDATE_LOCK                         (1 << 16)
+#define AVIVO_D1GRPH_FLIP_CONTROL                               0x6148
+
+#define AVIVO_D1CUR_CONTROL                     0x6400
+#       define AVIVO_D1CURSOR_EN                (1 << 0)
+#       define AVIVO_D1CURSOR_MODE_SHIFT        8
+#       define AVIVO_D1CURSOR_MODE_MASK         (3 << 8)
+#       define AVIVO_D1CURSOR_MODE_24BPP        2
+#define AVIVO_D1CUR_SURFACE_ADDRESS             0x6408
+#define AVIVO_D1CUR_SIZE                        0x6410
+#define AVIVO_D1CUR_POSITION                    0x6414
+#define AVIVO_D1CUR_HOT_SPOT                    0x6418
+#define AVIVO_D1CUR_UPDATE                      0x6424
+#       define AVIVO_D1CURSOR_UPDATE_LOCK       (1 << 16)
+
+#define AVIVO_DC_LUT_RW_SELECT                  0x6480
+#define AVIVO_DC_LUT_RW_MODE                    0x6484
+#define AVIVO_DC_LUT_RW_INDEX                   0x6488
+#define AVIVO_DC_LUT_SEQ_COLOR                  0x648c
+#define AVIVO_DC_LUT_PWL_DATA                   0x6490
+#define AVIVO_DC_LUT_30_COLOR                   0x6494
+#define AVIVO_DC_LUT_READ_PIPE_SELECT           0x6498
+#define AVIVO_DC_LUT_WRITE_EN_MASK              0x649c
+#define AVIVO_DC_LUT_AUTOFILL                   0x64a0
+
+#define AVIVO_DC_LUTA_CONTROL                   0x64c0
+#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE         0x64c4
+#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN        0x64c8
+#define AVIVO_DC_LUTA_BLACK_OFFSET_RED          0x64cc
+#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE         0x64d0
+#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN        0x64d4
+#define AVIVO_DC_LUTA_WHITE_OFFSET_RED          0x64d8
+
+#define AVIVO_DC_LB_MEMORY_SPLIT                0x6520
+#       define AVIVO_DC_LB_MEMORY_SPLIT_MASK    0x3
+#       define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT   0
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF  0
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q    1
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY        2
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q    3
+#       define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
+#       define AVIVO_DC_LB_DISP1_END_ADR_SHIFT  4
+#       define AVIVO_DC_LB_DISP1_END_ADR_MASK   0x7ff
+
+#define R500_DxMODE_INT_MASK 0x6540
+#define R500_D1MODE_INT_MASK (1<<0)
+#define R500_D2MODE_INT_MASK (1<<8)
+
+#define AVIVO_D1MODE_DATA_FORMAT                0x6528
+#       define AVIVO_D1MODE_INTERLEAVE_EN       (1 << 0)
+#define AVIVO_D1MODE_DESKTOP_HEIGHT             0x652C
+#define AVIVO_D1MODE_VIEWPORT_START             0x6580
+#define AVIVO_D1MODE_VIEWPORT_SIZE              0x6584
+#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT    0x6588
+#define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM    0x658c
+
+#define AVIVO_D1SCL_SCALER_ENABLE               0x6590
+#define AVIVO_D1SCL_SCALER_TAP_CONTROL         0x6594
+#define AVIVO_D1SCL_UPDATE                      0x65cc
+#       define AVIVO_D1SCL_UPDATE_LOCK          (1 << 16)
+
+/* second crtc */
+#define AVIVO_D2CRTC_H_TOTAL                                   0x6800
+#define AVIVO_D2CRTC_H_BLANK_START_END                          0x6804
+#define AVIVO_D2CRTC_H_SYNC_A                                   0x6808
+#define AVIVO_D2CRTC_H_SYNC_A_CNTL                              0x680c
+#define AVIVO_D2CRTC_H_SYNC_B                                   0x6810
+#define AVIVO_D2CRTC_H_SYNC_B_CNTL                              0x6814
+
+#define AVIVO_D2CRTC_V_TOTAL                                   0x6820
+#define AVIVO_D2CRTC_V_BLANK_START_END                          0x6824
+#define AVIVO_D2CRTC_V_SYNC_A                                   0x6828
+#define AVIVO_D2CRTC_V_SYNC_A_CNTL                              0x682c
+#define AVIVO_D2CRTC_V_SYNC_B                                   0x6830
+#define AVIVO_D2CRTC_V_SYNC_B_CNTL                              0x6834
+
+#define AVIVO_D2CRTC_CONTROL                                    0x6880
+#define AVIVO_D2CRTC_BLANK_CONTROL                              0x6884
+#define AVIVO_D2CRTC_INTERLACE_CONTROL                          0x6888
+#define AVIVO_D2CRTC_INTERLACE_STATUS                           0x688c
+#define AVIVO_D2CRTC_STEREO_CONTROL                             0x68c4
+
+#define AVIVO_D2GRPH_ENABLE                                     0x6900
+#define AVIVO_D2GRPH_CONTROL                                    0x6904
+#define AVIVO_D2GRPH_LUT_SEL                                    0x6908
+#define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS                    0x6910
+#define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS                  0x6918
+#define AVIVO_D2GRPH_PITCH                                      0x6920
+#define AVIVO_D2GRPH_SURFACE_OFFSET_X                           0x6924
+#define AVIVO_D2GRPH_SURFACE_OFFSET_Y                           0x6928
+#define AVIVO_D2GRPH_X_START                                    0x692c
+#define AVIVO_D2GRPH_Y_START                                    0x6930
+#define AVIVO_D2GRPH_X_END                                      0x6934
+#define AVIVO_D2GRPH_Y_END                                      0x6938
+#define AVIVO_D2GRPH_UPDATE                                     0x6944
+#define AVIVO_D2GRPH_FLIP_CONTROL                               0x6948
+
+#define AVIVO_D2CUR_CONTROL                     0x6c00
+#define AVIVO_D2CUR_SURFACE_ADDRESS             0x6c08
+#define AVIVO_D2CUR_SIZE                        0x6c10
+#define AVIVO_D2CUR_POSITION                    0x6c14
+
+#define AVIVO_D2MODE_VIEWPORT_START             0x6d80
+#define AVIVO_D2MODE_VIEWPORT_SIZE              0x6d84
+#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT    0x6d88
+#define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM    0x6d8c
+
+#define AVIVO_D2SCL_SCALER_ENABLE               0x6d90
+#define AVIVO_D2SCL_SCALER_TAP_CONTROL         0x6d94
+
+#define AVIVO_DDIA_BIT_DEPTH_CONTROL                           0x7214
+
+#define AVIVO_DACA_ENABLE                                      0x7800
+#      define AVIVO_DAC_ENABLE                         (1 << 0)
+#define AVIVO_DACA_SOURCE_SELECT                               0x7804
+#       define AVIVO_DAC_SOURCE_CRTC1                   (0 << 0)
+#       define AVIVO_DAC_SOURCE_CRTC2                   (1 << 0)
+#       define AVIVO_DAC_SOURCE_TV                      (2 << 0)
+
+#define AVIVO_DACA_FORCE_OUTPUT_CNTL                           0x783c
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN             (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT            (8)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE             (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN            (1 << 1)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED              (1 << 2)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY       (1 << 24)
+#define AVIVO_DACA_POWERDOWN                                   0x7850
+# define AVIVO_DACA_POWERDOWN_POWERDOWN                         (1 << 0)
+# define AVIVO_DACA_POWERDOWN_BLUE                              (1 << 8)
+# define AVIVO_DACA_POWERDOWN_GREEN                             (1 << 16)
+# define AVIVO_DACA_POWERDOWN_RED                               (1 << 24)
+
+#define AVIVO_DACB_ENABLE                                      0x7a00
+#define AVIVO_DACB_SOURCE_SELECT                               0x7a04
+#define AVIVO_DACB_FORCE_OUTPUT_CNTL                           0x7a3c
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN             (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT            (8)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE             (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN            (1 << 1)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED              (1 << 2)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY       (1 << 24)
+#define AVIVO_DACB_POWERDOWN                                   0x7a50
+# define AVIVO_DACB_POWERDOWN_POWERDOWN                         (1 << 0)
+# define AVIVO_DACB_POWERDOWN_BLUE                              (1 << 8)
+# define AVIVO_DACB_POWERDOWN_GREEN                             (1 << 16)
+# define AVIVO_DACB_POWERDOWN_RED
+
+#define AVIVO_TMDSA_CNTL                    0x7880
+#   define AVIVO_TMDSA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_TMDSA_CNTL_HPD_MASK             (1 << 4)
+#   define AVIVO_TMDSA_CNTL_HPD_SELECT           (1 << 8)
+#   define AVIVO_TMDSA_CNTL_SYNC_PHASE           (1 << 12)
+#   define AVIVO_TMDSA_CNTL_PIXEL_ENCODING       (1 << 16)
+#   define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE     (1 << 24)
+#   define AVIVO_TMDSA_CNTL_SWAP                 (1 << 28)
+#define AVIVO_TMDSA_SOURCE_SELECT                              0x7884
+/* 78a8 appears to be some kind of (reasonably tolerant) clock?
+ * 78d0 definitely hits the transmitter, definitely clock. */
+/* MYSTERY1 This appears to control dithering? */
+#define AVIVO_TMDSA_BIT_DEPTH_CONTROL          0x7894
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN           (1 << 0)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH        (1 << 4)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN     (1 << 8)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH  (1 << 12)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN    (1 << 16)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL        (1 << 24)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+#define AVIVO_TMDSA_DCBALANCER_CONTROL                  0x78d0
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_EN                  (1 << 0)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN             (1 << 8)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT       (16)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE               (1 << 24)
+#define AVIVO_TMDSA_DATA_SYNCHRONIZATION                0x78d8
+#   define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL           (1 << 0)
+#   define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG          (1 << 8)
+#define AVIVO_TMDSA_CLOCK_ENABLE            0x7900
+#define AVIVO_TMDSA_TRANSMITTER_ENABLE              0x7904
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE          (1 << 0)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN             (1 << 1)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN            (1 << 2)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN            (1 << 3)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN            (1 << 4)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE          (1 << 8)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN            (1 << 10)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN            (1 << 11)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN            (1 << 12)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK  (1 << 16)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK     (1 << 17)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK     (1 << 18)
+
+#define AVIVO_TMDSA_TRANSMITTER_CONTROL                                0x7910
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE       (1 << 0)
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET        (1 << 1)
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT       (2)
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL         (1 << 4)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP          (1 << 5)
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK            (1 << 8)
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS  (1 << 13)
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK            (1 << 14)
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS  (1 << 15)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL       (1 << 28)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA     (1 << 29)
+#      define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL       (1 << 31)
+
+#define AVIVO_LVTMA_CNTL                                       0x7a80
+#   define AVIVO_LVTMA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_LVTMA_CNTL_HPD_MASK             (1 << 4)
+#   define AVIVO_LVTMA_CNTL_HPD_SELECT           (1 << 8)
+#   define AVIVO_LVTMA_CNTL_SYNC_PHASE           (1 << 12)
+#   define AVIVO_LVTMA_CNTL_PIXEL_ENCODING       (1 << 16)
+#   define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE     (1 << 24)
+#   define AVIVO_LVTMA_CNTL_SWAP                 (1 << 28)
+#define AVIVO_LVTMA_SOURCE_SELECT                               0x7a84
+#define AVIVO_LVTMA_COLOR_FORMAT                                0x7a88
+#define AVIVO_LVTMA_BIT_DEPTH_CONTROL                           0x7a94
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN           (1 << 0)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH        (1 << 4)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN     (1 << 8)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH  (1 << 12)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN    (1 << 16)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL        (1 << 24)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+
+
+
+#define AVIVO_LVTMA_DCBALANCER_CONTROL                  0x7ad0
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_EN                  (1 << 0)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN             (1 << 8)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT       (16)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE               (1 << 24)
+
+#define AVIVO_LVTMA_DATA_SYNCHRONIZATION                0x78d8
+#   define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL           (1 << 0)
+#   define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG          (1 << 8)
+#define R500_LVTMA_CLOCK_ENABLE                        0x7b00
+#define R600_LVTMA_CLOCK_ENABLE                        0x7b04
+
+#define R500_LVTMA_TRANSMITTER_ENABLE              0x7b04
+#define R600_LVTMA_TRANSMITTER_ENABLE              0x7b08
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN             (1 << 1)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN            (1 << 2)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN            (1 << 3)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN            (1 << 4)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN            (1 << 5)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN             (1 << 9)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN            (1 << 10)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN            (1 << 11)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN            (1 << 12)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK     (1 << 17)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK     (1 << 18)
+
+#define R500_LVTMA_TRANSMITTER_CONTROL                         0x7b10
+#define R600_LVTMA_TRANSMITTER_CONTROL                         0x7b14
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE         (1 << 0)
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET          (1 << 1)
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL           (1 << 4)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP            (1 << 5)
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN   (1 << 6)
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK              (1 << 8)
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS    (1 << 13)
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK              (1 << 14)
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS    (1 << 15)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT  (16)
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL         (1 << 28)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA       (1 << 29)
+#      define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
+
+#define R500_LVTMA_PWRSEQ_CNTL                                         0x7af0
+#define R600_LVTMA_PWRSEQ_CNTL                                         0x7af4
+#      define AVIVO_LVTMA_PWRSEQ_EN                                        (1 << 0)
+#      define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK                           (1 << 2)
+#      define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK                            (1 << 3)
+#      define AVIVO_LVTMA_PWRSEQ_TARGET_STATE                              (1 << 4)
+#      define AVIVO_LVTMA_SYNCEN                                           (1 << 8)
+#      define AVIVO_LVTMA_SYNCEN_OVRD                                      (1 << 9)
+#      define AVIVO_LVTMA_SYNCEN_POL                                       (1 << 10)
+#      define AVIVO_LVTMA_DIGON                                            (1 << 16)
+#      define AVIVO_LVTMA_DIGON_OVRD                                       (1 << 17)
+#      define AVIVO_LVTMA_DIGON_POL                                        (1 << 18)
+#      define AVIVO_LVTMA_BLON                                             (1 << 24)
+#      define AVIVO_LVTMA_BLON_OVRD                                        (1 << 25)
+#      define AVIVO_LVTMA_BLON_POL                                         (1 << 26)
+
+#define R500_LVTMA_PWRSEQ_STATE                        0x7af4
+#define R600_LVTMA_PWRSEQ_STATE                        0x7af8
+#       define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R          (1 << 0)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_DIGON                   (1 << 1)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN                  (1 << 2)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_BLON                    (1 << 3)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_DONE                    (1 << 4)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT            (8)
+
+#define AVIVO_LVDS_BACKLIGHT_CNTL                      0x7af8
+#      define AVIVO_LVDS_BACKLIGHT_CNTL_EN                     (1 << 0)
+#      define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK          0x0000ff00
+#      define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT         8
+
+#define AVIVO_DVOA_BIT_DEPTH_CONTROL                   0x7988
+
+#define AVIVO_GPIO_0                        0x7e30
+#define AVIVO_GPIO_1                        0x7e40
+#define AVIVO_GPIO_2                        0x7e50
+#define AVIVO_GPIO_3                        0x7e60
+
+#define AVIVO_DC_GPIO_HPD_Y                 0x7e9c
+
+#define AVIVO_I2C_STATUS                                       0x7d30
+#      define AVIVO_I2C_STATUS_DONE                            (1 << 0)
+#      define AVIVO_I2C_STATUS_NACK                            (1 << 1)
+#      define AVIVO_I2C_STATUS_HALT                            (1 << 2)
+#      define AVIVO_I2C_STATUS_GO                              (1 << 3)
+#      define AVIVO_I2C_STATUS_MASK                            0x7
+/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe
+ * DONE? */
+#      define AVIVO_I2C_STATUS_CMD_RESET                       0x7
+#      define AVIVO_I2C_STATUS_CMD_WAIT                        (1 << 3)
+#define AVIVO_I2C_STOP                                         0x7d34
+#define AVIVO_I2C_START_CNTL                           0x7d38
+#      define AVIVO_I2C_START                                          (1 << 8)
+#      define AVIVO_I2C_CONNECTOR0                                     (0 << 16)
+#      define AVIVO_I2C_CONNECTOR1                                     (1 << 16)
+#define R520_I2C_START (1<<0)
+#define R520_I2C_STOP (1<<1)
+#define R520_I2C_RX (1<<2)
+#define R520_I2C_EN (1<<8)
+#define R520_I2C_DDC1 (0<<16)
+#define R520_I2C_DDC2 (1<<16)
+#define R520_I2C_DDC3 (2<<16)
+#define R520_I2C_DDC_MASK (3<<16)
+#define AVIVO_I2C_CONTROL2                                     0x7d3c
+#      define AVIVO_I2C_7D3C_SIZE_SHIFT                        8
+#      define AVIVO_I2C_7D3C_SIZE_MASK                         (0xf << 8)
+#define AVIVO_I2C_CONTROL3                                             0x7d40
+/* Reading is done 4 bytes at a time: read the bottom 8 bits from
+ * 7d44, four times in a row.
+ * Writing is a little more complex.  First write DATA with
+ * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic
+ * magic number, zz is, I think, the slave address, and yy is the byte
+ * you want to write. */
+#define AVIVO_I2C_DATA                                         0x7d44
+#define R520_I2C_ADDR_COUNT_MASK (0x7)
+#define R520_I2C_DATA_COUNT_SHIFT (8)
+#define R520_I2C_DATA_COUNT_MASK (0xF00)
+#define AVIVO_I2C_CNTL                                         0x7d50
+#      define AVIVO_I2C_EN                                                     (1 << 0)
+#      define AVIVO_I2C_RESET                                          (1 << 8)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
new file mode 100644 (file)
index 0000000..570a244
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* r520,rv530,rv560,rv570,r580 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+int rv370_pcie_gart_enable(struct radeon_device *rdev);
+void rv370_pcie_gart_disable(struct radeon_device *rdev);
+void r420_pipes_init(struct radeon_device *rdev);
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+void rs600_disable_vga(struct radeon_device *rdev);
+int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
+int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+
+/* This files gather functions specifics to:
+ * r520,rv530,rv560,rv570,r580
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void r520_gpu_init(struct radeon_device *rdev);
+int r520_mc_wait_for_idle(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int r520_mc_init(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int r;
+
+       if (r100_debugfs_rbbm_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+       }
+       if (rv515_debugfs_pipes_info_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for pipes !\n");
+       }
+       if (rv515_debugfs_ga_info_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for pipes !\n");
+       }
+
+       r520_gpu_init(rdev);
+       rv370_pcie_gart_disable(rdev);
+
+       /* Setup GPU memory space */
+       rdev->mc.vram_location = 0xFFFFFFFFUL;
+       rdev->mc.gtt_location = 0xFFFFFFFFUL;
+       if (rdev->flags & RADEON_IS_AGP) {
+               r = radeon_agp_init(rdev);
+               if (r) {
+                       printk(KERN_WARNING "[drm] Disabling AGP\n");
+                       rdev->flags &= ~RADEON_IS_AGP;
+                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+               } else {
+                       rdev->mc.gtt_location = rdev->mc.agp_base;
+               }
+       }
+       r = radeon_mc_setup(rdev);
+       if (r) {
+               return r;
+       }
+
+       /* Program GPU memory space */
+       rs600_mc_disable_clients(rdev);
+       if (r520_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+       /* Write VRAM size in case we are limiting it */
+       WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+       tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+       tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
+       tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
+       WREG32_MC(R520_MC_FB_LOCATION, tmp);
+       WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
+       WREG32(0x310, rdev->mc.vram_location);
+       if (rdev->flags & RADEON_IS_AGP) {
+               tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+               tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
+               tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
+               WREG32_MC(R520_MC_AGP_LOCATION, tmp);
+               WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
+               WREG32_MC(R520_MC_AGP_BASE_2, 0);
+       } else {
+               WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
+               WREG32_MC(R520_MC_AGP_BASE, 0);
+               WREG32_MC(R520_MC_AGP_BASE_2, 0);
+       }
+       return 0;
+}
+
+void r520_mc_fini(struct radeon_device *rdev)
+{
+       rv370_pcie_gart_disable(rdev);
+       radeon_gart_table_vram_free(rdev);
+       radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+void r520_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+}
+
+int r520_mc_wait_for_idle(struct radeon_device *rdev)
+{
+       unsigned i;
+       uint32_t tmp;
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32_MC(R520_MC_STATUS);
+               if (tmp & R520_MC_STATUS_IDLE) {
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       return -1;
+}
+
+void r520_gpu_init(struct radeon_device *rdev)
+{
+       unsigned pipe_select_current, gb_pipe_select, tmp;
+
+       r100_hdp_reset(rdev);
+       rs600_disable_vga(rdev);
+       /*
+        * DST_PIPE_CONFIG              0x170C
+        * GB_TILE_CONFIG               0x4018
+        * GB_FIFO_SIZE                 0x4024
+        * GB_PIPE_SELECT               0x402C
+        * GB_PIPE_SELECT2              0x4124
+        *      Z_PIPE_SHIFT                    0
+        *      Z_PIPE_MASK                     0x000000003
+        * GB_FIFO_SIZE2                0x4128
+        *      SC_SFIFO_SIZE_SHIFT             0
+        *      SC_SFIFO_SIZE_MASK              0x000000003
+        *      SC_MFIFO_SIZE_SHIFT             2
+        *      SC_MFIFO_SIZE_MASK              0x00000000C
+        *      FG_SFIFO_SIZE_SHIFT             4
+        *      FG_SFIFO_SIZE_MASK              0x000000030
+        *      ZB_MFIFO_SIZE_SHIFT             6
+        *      ZB_MFIFO_SIZE_MASK              0x0000000C0
+        * GA_ENHANCE                   0x4274
+        * SU_REG_DEST                  0x42C8
+        */
+       /* workaround for RV530 */
+       if (rdev->family == CHIP_RV530) {
+               WREG32(0x4124, 1);
+               WREG32(0x4128, 0xFF);
+       }
+       r420_pipes_init(rdev);
+       gb_pipe_select = RREG32(0x402C);
+       tmp = RREG32(0x170C);
+       pipe_select_current = (tmp >> 2) & 3;
+       tmp = (1 << pipe_select_current) |
+             (((gb_pipe_select >> 8) & 0xF) << 4);
+       WREG32_PLL(0x000D, tmp);
+       if (r520_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+}
+
+
+/*
+ * VRAM info
+ */
+static void r520_vram_get_type(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       rdev->mc.vram_width = 128;
+       rdev->mc.vram_is_ddr = true;
+       tmp = RREG32_MC(R520_MC_CNTL0);
+       switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
+       case 0:
+               rdev->mc.vram_width = 32;
+               break;
+       case 1:
+               rdev->mc.vram_width = 64;
+               break;
+       case 2:
+               rdev->mc.vram_width = 128;
+               break;
+       case 3:
+               rdev->mc.vram_width = 256;
+               break;
+       default:
+               rdev->mc.vram_width = 128;
+               break;
+       }
+       if (tmp & R520_MC_CHANNEL_SIZE)
+               rdev->mc.vram_width *= 2;
+}
+
+void r520_vram_info(struct radeon_device *rdev)
+{
+       r520_vram_get_type(rdev);
+       rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+
+       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
new file mode 100644 (file)
index 0000000..c45559f
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* r600,rv610,rv630,rv620,rv635,rv670 depends on : */
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+
+/* This files gather functions specifics to:
+ * r600,rv610,rv630,rv620,rv635,rv670
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+void r600_gpu_init(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int r600_mc_init(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       r600_gpu_init(rdev);
+
+       /* setup the gart before changing location so we can ask to
+        * discard unmapped mc request
+        */
+       /* FIXME: disable out of gart access */
+       tmp = rdev->mc.gtt_location / 4096;
+       tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
+       WREG32(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
+       tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
+       tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
+       WREG32(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
+
+       rs600_mc_disable_clients(rdev);
+       if (r600_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+
+       tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+       tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24);
+       tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24);
+       WREG32(R600_MC_VM_FB_LOCATION, tmp);
+       tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+       tmp = REG_SET(R600_MC_AGP_TOP, tmp >> 22);
+       WREG32(R600_MC_VM_AGP_TOP, tmp);
+       tmp = REG_SET(R600_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
+       WREG32(R600_MC_VM_AGP_BOT, tmp);
+       return 0;
+}
+
+void r600_mc_fini(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+}
+
+
+/*
+ * Global GPU functions
+ */
+void r600_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+}
+
+int r600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+       return 0;
+}
+
+void r600_gpu_init(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+}
+
+
+/*
+ * VRAM info
+ */
+void r600_vram_get_type(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int chansize;
+
+       rdev->mc.vram_width = 128;
+       rdev->mc.vram_is_ddr = true;
+
+       tmp = RREG32(R600_RAMCFG);
+       if (tmp & R600_CHANSIZE_OVERRIDE) {
+               chansize = 16;
+       } else if (tmp & R600_CHANSIZE) {
+               chansize = 64;
+       } else {
+               chansize = 32;
+       }
+       if (rdev->family == CHIP_R600) {
+               rdev->mc.vram_width = 8 * chansize;
+       } else if (rdev->family == CHIP_RV670) {
+               rdev->mc.vram_width = 4 * chansize;
+       } else if ((rdev->family == CHIP_RV610) ||
+                       (rdev->family == CHIP_RV620)) {
+               rdev->mc.vram_width = chansize;
+       } else if ((rdev->family == CHIP_RV630) ||
+                       (rdev->family == CHIP_RV635)) {
+               rdev->mc.vram_width = 2 * chansize;
+       }
+}
+
+void r600_vram_info(struct radeon_device *rdev)
+{
+       r600_vram_get_type(rdev);
+       rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE);
+
+       /* Could aper size report 0 ? */
+       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       uint32_t r;
+
+       WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
+       (void)RREG32(R600_PCIE_PORT_INDEX);
+       r = RREG32(R600_PCIE_PORT_DATA);
+       return r;
+}
+
+void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
+       (void)RREG32(R600_PCIE_PORT_INDEX);
+       WREG32(R600_PCIE_PORT_DATA, (v));
+       (void)RREG32(R600_PCIE_PORT_DATA);
+}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
new file mode 100644 (file)
index 0000000..e2d1f5f
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R600_REG_H__
+#define __R600_REG_H__
+
+#define R600_PCIE_PORT_INDEX                0x0038
+#define R600_PCIE_PORT_DATA                 0x003c
+
+#define R600_MC_VM_FB_LOCATION                 0x2180
+#define                R600_MC_FB_BASE_MASK                    0x0000FFFF
+#define                R600_MC_FB_BASE_SHIFT                   0
+#define                R600_MC_FB_TOP_MASK                     0xFFFF0000
+#define                R600_MC_FB_TOP_SHIFT                    16
+#define R600_MC_VM_AGP_TOP                     0x2184
+#define                R600_MC_AGP_TOP_MASK                    0x0003FFFF
+#define                R600_MC_AGP_TOP_SHIFT                   0
+#define R600_MC_VM_AGP_BOT                     0x2188
+#define                R600_MC_AGP_BOT_MASK                    0x0003FFFF
+#define                R600_MC_AGP_BOT_SHIFT                   0
+#define R600_MC_VM_AGP_BASE                    0x218c
+#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR    0x2190
+#define                R600_LOGICAL_PAGE_NUMBER_MASK           0x000FFFFF
+#define                R600_LOGICAL_PAGE_NUMBER_SHIFT          0
+#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR   0x2194
+#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR        0x2198
+
+#define R700_MC_VM_FB_LOCATION                 0x2024
+#define                R700_MC_FB_BASE_MASK                    0x0000FFFF
+#define                R700_MC_FB_BASE_SHIFT                   0
+#define                R700_MC_FB_TOP_MASK                     0xFFFF0000
+#define                R700_MC_FB_TOP_SHIFT                    16
+#define R700_MC_VM_AGP_TOP                     0x2028
+#define                R700_MC_AGP_TOP_MASK                    0x0003FFFF
+#define                R700_MC_AGP_TOP_SHIFT                   0
+#define R700_MC_VM_AGP_BOT                     0x202c
+#define                R700_MC_AGP_BOT_MASK                    0x0003FFFF
+#define                R700_MC_AGP_BOT_SHIFT                   0
+#define R700_MC_VM_AGP_BASE                    0x2030
+#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR    0x2034
+#define                R700_LOGICAL_PAGE_NUMBER_MASK           0x000FFFFF
+#define                R700_LOGICAL_PAGE_NUMBER_SHIFT          0
+#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR   0x2038
+#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR        0x203c
+
+#define R600_RAMCFG                                   0x2408
+#       define R600_CHANSIZE                           (1 << 7)
+#       define R600_CHANSIZE_OVERRIDE                  (1 << 10)
+
+
+#define R600_GENERAL_PWRMGT                                        0x618
+#      define R600_OPEN_DRAIN_PADS                                (1 << 11)
+
+#define R600_LOWER_GPIO_ENABLE                                     0x710
+#define R600_CTXSW_VID_LOWER_GPIO_CNTL                             0x718
+#define R600_HIGH_VID_LOWER_GPIO_CNTL                              0x71c
+#define R600_MEDIUM_VID_LOWER_GPIO_CNTL                            0x720
+#define R600_LOW_VID_LOWER_GPIO_CNTL                               0x724
+
+
+
+#define R600_HDP_NONSURFACE_BASE                                0x2c04
+
+#define R600_BUS_CNTL                                           0x5420
+#define R600_CONFIG_CNTL                                        0x5424
+#define R600_CONFIG_MEMSIZE                                     0x5428
+#define R600_CONFIG_F0_BASE                                     0x542C
+#define R600_CONFIG_APER_SIZE                                   0x5430
+
+#define R600_ROM_CNTL                              0x1600
+#       define R600_SCK_OVERWRITE                  (1 << 1)
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK  (0xf << 28)
+
+#define R600_CG_SPLL_FUNC_CNTL                     0x600
+#       define R600_SPLL_BYPASS_EN                 (1 << 3)
+#define R600_CG_SPLL_STATUS                        0x60c
+#       define R600_SPLL_CHG_STATUS                (1 << 1)
+
+#define R600_BIOS_0_SCRATCH               0x1724
+#define R600_BIOS_1_SCRATCH               0x1728
+#define R600_BIOS_2_SCRATCH               0x172c
+#define R600_BIOS_3_SCRATCH               0x1730
+#define R600_BIOS_4_SCRATCH               0x1734
+#define R600_BIOS_5_SCRATCH               0x1738
+#define R600_BIOS_6_SCRATCH               0x173c
+#define R600_BIOS_7_SCRATCH               0x1740
+
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
new file mode 100644 (file)
index 0000000..c3f24cc
--- /dev/null
@@ -0,0 +1,793 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RADEON_H__
+#define __RADEON_H__
+
+#include "radeon_object.h"
+
+/* TODO: Here are things that needs to be done :
+ *     - surface allocator & initializer : (bit like scratch reg) should
+ *       initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
+ *       related to surface
+ *     - WB : write back stuff (do it bit like scratch reg things)
+ *     - Vblank : look at Jesse's rework and what we should do
+ *     - r600/r700: gart & cp
+ *     - cs : clean cs ioctl use bitmap & things like that.
+ *     - power management stuff
+ *     - Barrier in gart code
+ *     - Unmappabled vram ?
+ *     - TESTING, TESTING, TESTING
+ */
+
+#include <asm/atomic.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+
+#include "radeon_mode.h"
+#include "radeon_reg.h"
+
+
+/*
+ * Modules parameters.
+ */
+extern int radeon_no_wb;
+extern int radeon_modeset;
+extern int radeon_dynclks;
+extern int radeon_r4xx_atom;
+extern int radeon_agpmode;
+extern int radeon_vram_limit;
+extern int radeon_gart_size;
+extern int radeon_benchmarking;
+extern int radeon_connector_table;
+
+/*
+ * Copy from radeon_drv.h so we don't have to include both and have conflicting
+ * symbol;
+ */
+#define RADEON_MAX_USEC_TIMEOUT                100000  /* 100 ms */
+#define RADEON_IB_POOL_SIZE            16
+#define RADEON_DEBUGFS_MAX_NUM_FILES   32
+#define RADEONFB_CONN_LIMIT            4
+
+enum radeon_family {
+       CHIP_R100,
+       CHIP_RV100,
+       CHIP_RS100,
+       CHIP_RV200,
+       CHIP_RS200,
+       CHIP_R200,
+       CHIP_RV250,
+       CHIP_RS300,
+       CHIP_RV280,
+       CHIP_R300,
+       CHIP_R350,
+       CHIP_RV350,
+       CHIP_RV380,
+       CHIP_R420,
+       CHIP_R423,
+       CHIP_RV410,
+       CHIP_RS400,
+       CHIP_RS480,
+       CHIP_RS600,
+       CHIP_RS690,
+       CHIP_RS740,
+       CHIP_RV515,
+       CHIP_R520,
+       CHIP_RV530,
+       CHIP_RV560,
+       CHIP_RV570,
+       CHIP_R580,
+       CHIP_R600,
+       CHIP_RV610,
+       CHIP_RV630,
+       CHIP_RV620,
+       CHIP_RV635,
+       CHIP_RV670,
+       CHIP_RS780,
+       CHIP_RV770,
+       CHIP_RV730,
+       CHIP_RV710,
+       CHIP_LAST,
+};
+
+enum radeon_chip_flags {
+       RADEON_FAMILY_MASK = 0x0000ffffUL,
+       RADEON_FLAGS_MASK = 0xffff0000UL,
+       RADEON_IS_MOBILITY = 0x00010000UL,
+       RADEON_IS_IGP = 0x00020000UL,
+       RADEON_SINGLE_CRTC = 0x00040000UL,
+       RADEON_IS_AGP = 0x00080000UL,
+       RADEON_HAS_HIERZ = 0x00100000UL,
+       RADEON_IS_PCIE = 0x00200000UL,
+       RADEON_NEW_MEMMAP = 0x00400000UL,
+       RADEON_IS_PCI = 0x00800000UL,
+       RADEON_IS_IGPGART = 0x01000000UL,
+};
+
+
+/*
+ * Errata workarounds.
+ */
+enum radeon_pll_errata {
+       CHIP_ERRATA_R300_CG             = 0x00000001,
+       CHIP_ERRATA_PLL_DUMMYREADS      = 0x00000002,
+       CHIP_ERRATA_PLL_DELAY           = 0x00000004
+};
+
+
+struct radeon_device;
+
+
+/*
+ * BIOS.
+ */
+bool radeon_get_bios(struct radeon_device *rdev);
+
+/*
+ * Clocks
+ */
+
+struct radeon_clock {
+       struct radeon_pll p1pll;
+       struct radeon_pll p2pll;
+       struct radeon_pll spll;
+       struct radeon_pll mpll;
+       /* 10 Khz units */
+       uint32_t default_mclk;
+       uint32_t default_sclk;
+};
+
+/*
+ * Fences.
+ */
+struct radeon_fence_driver {
+       uint32_t                        scratch_reg;
+       atomic_t                        seq;
+       uint32_t                        last_seq;
+       unsigned long                   count_timeout;
+       wait_queue_head_t               queue;
+       rwlock_t                        lock;
+       struct list_head                created;
+       struct list_head                emited;
+       struct list_head                signaled;
+};
+
+struct radeon_fence {
+       struct radeon_device            *rdev;
+       struct kref                     kref;
+       struct list_head                list;
+       /* protected by radeon_fence.lock */
+       uint32_t                        seq;
+       unsigned long                   timeout;
+       bool                            emited;
+       bool                            signaled;
+};
+
+int radeon_fence_driver_init(struct radeon_device *rdev);
+void radeon_fence_driver_fini(struct radeon_device *rdev);
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
+void radeon_fence_process(struct radeon_device *rdev);
+bool radeon_fence_signaled(struct radeon_fence *fence);
+int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+int radeon_fence_wait_next(struct radeon_device *rdev);
+int radeon_fence_wait_last(struct radeon_device *rdev);
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
+void radeon_fence_unref(struct radeon_fence **fence);
+
+
+/*
+ * Radeon buffer.
+ */
+struct radeon_object;
+
+struct radeon_object_list {
+       struct list_head        list;
+       struct radeon_object    *robj;
+       uint64_t                gpu_offset;
+       unsigned                rdomain;
+       unsigned                wdomain;
+};
+
+int radeon_object_init(struct radeon_device *rdev);
+void radeon_object_fini(struct radeon_device *rdev);
+int radeon_object_create(struct radeon_device *rdev,
+                        struct drm_gem_object *gobj,
+                        unsigned long size,
+                        bool kernel,
+                        uint32_t domain,
+                        bool interruptible,
+                        struct radeon_object **robj_ptr);
+int radeon_object_kmap(struct radeon_object *robj, void **ptr);
+void radeon_object_kunmap(struct radeon_object *robj);
+void radeon_object_unref(struct radeon_object **robj);
+int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
+                     uint64_t *gpu_addr);
+void radeon_object_unpin(struct radeon_object *robj);
+int radeon_object_wait(struct radeon_object *robj);
+int radeon_object_evict_vram(struct radeon_device *rdev);
+int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
+void radeon_object_force_delete(struct radeon_device *rdev);
+void radeon_object_list_add_object(struct radeon_object_list *lobj,
+                                  struct list_head *head);
+int radeon_object_list_validate(struct list_head *head, void *fence);
+void radeon_object_list_unvalidate(struct list_head *head);
+void radeon_object_list_clean(struct list_head *head);
+int radeon_object_fbdev_mmap(struct radeon_object *robj,
+                            struct vm_area_struct *vma);
+unsigned long radeon_object_size(struct radeon_object *robj);
+
+
+/*
+ * GEM objects.
+ */
+struct radeon_gem {
+       struct list_head        objects;
+};
+
+int radeon_gem_init(struct radeon_device *rdev);
+void radeon_gem_fini(struct radeon_device *rdev);
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
+                            int alignment, int initial_domain,
+                            bool discardable, bool kernel,
+                            bool interruptible,
+                            struct drm_gem_object **obj);
+int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+                         uint64_t *gpu_addr);
+void radeon_gem_object_unpin(struct drm_gem_object *obj);
+
+
+/*
+ * GART structures, functions & helpers
+ */
+struct radeon_mc;
+
+struct radeon_gart_table_ram {
+       volatile uint32_t               *ptr;
+};
+
+struct radeon_gart_table_vram {
+       struct radeon_object            *robj;
+       volatile uint32_t               *ptr;
+};
+
+union radeon_gart_table {
+       struct radeon_gart_table_ram    ram;
+       struct radeon_gart_table_vram   vram;
+};
+
+struct radeon_gart {
+       dma_addr_t                      table_addr;
+       unsigned                        num_gpu_pages;
+       unsigned                        num_cpu_pages;
+       unsigned                        table_size;
+       union radeon_gart_table         table;
+       struct page                     **pages;
+       dma_addr_t                      *pages_addr;
+       bool                            ready;
+};
+
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_ram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_vram_free(struct radeon_device *rdev);
+int radeon_gart_init(struct radeon_device *rdev);
+void radeon_gart_fini(struct radeon_device *rdev);
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+                       int pages);
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+                    int pages, struct page **pagelist);
+
+
+/*
+ * GPU MC structures, functions & helpers
+ */
+struct radeon_mc {
+       resource_size_t         aper_size;
+       resource_size_t         aper_base;
+       resource_size_t         agp_base;
+       unsigned                gtt_location;
+       unsigned                gtt_size;
+       unsigned                vram_location;
+       unsigned                vram_size;
+       unsigned                vram_width;
+       int                     vram_mtrr;
+       bool                    vram_is_ddr;
+};
+
+int radeon_mc_setup(struct radeon_device *rdev);
+
+
+/*
+ * GPU scratch registers structures, functions & helpers
+ */
+struct radeon_scratch {
+       unsigned                num_reg;
+       bool                    free[32];
+       uint32_t                reg[32];
+};
+
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
+
+
+/*
+ * IRQS.
+ */
+struct radeon_irq {
+       bool            installed;
+       bool            sw_int;
+       /* FIXME: use a define max crtc rather than hardcode it */
+       bool            crtc_vblank_int[2];
+};
+
+int radeon_irq_kms_init(struct radeon_device *rdev);
+void radeon_irq_kms_fini(struct radeon_device *rdev);
+
+
+/*
+ * CP & ring.
+ */
+struct radeon_ib {
+       struct list_head        list;
+       unsigned long           idx;
+       uint64_t                gpu_addr;
+       struct radeon_fence     *fence;
+       volatile uint32_t       *ptr;
+       uint32_t                length_dw;
+};
+
+struct radeon_ib_pool {
+       struct mutex            mutex;
+       struct radeon_object    *robj;
+       struct list_head        scheduled_ibs;
+       struct radeon_ib        ibs[RADEON_IB_POOL_SIZE];
+       bool                    ready;
+       DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
+};
+
+struct radeon_cp {
+       struct radeon_object    *ring_obj;
+       volatile uint32_t       *ring;
+       unsigned                rptr;
+       unsigned                wptr;
+       unsigned                wptr_old;
+       unsigned                ring_size;
+       unsigned                ring_free_dw;
+       int                     count_dw;
+       uint64_t                gpu_addr;
+       uint32_t                align_mask;
+       uint32_t                ptr_mask;
+       struct mutex            mutex;
+       bool                    ready;
+};
+
+int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
+int radeon_ib_pool_init(struct radeon_device *rdev);
+void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_test(struct radeon_device *rdev);
+/* Ring access between begin & end cannot sleep */
+void radeon_ring_free_size(struct radeon_device *rdev);
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+void radeon_ring_unlock_commit(struct radeon_device *rdev);
+void radeon_ring_unlock_undo(struct radeon_device *rdev);
+int radeon_ring_test(struct radeon_device *rdev);
+int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
+void radeon_ring_fini(struct radeon_device *rdev);
+
+
+/*
+ * CS.
+ */
+struct radeon_cs_reloc {
+       struct drm_gem_object           *gobj;
+       struct radeon_object            *robj;
+       struct radeon_object_list       lobj;
+       uint32_t                        handle;
+       uint32_t                        flags;
+};
+
+struct radeon_cs_chunk {
+       uint32_t                chunk_id;
+       uint32_t                length_dw;
+       uint32_t                *kdata;
+};
+
+struct radeon_cs_parser {
+       struct radeon_device    *rdev;
+       struct drm_file         *filp;
+       /* chunks */
+       unsigned                nchunks;
+       struct radeon_cs_chunk  *chunks;
+       uint64_t                *chunks_array;
+       /* IB */
+       unsigned                idx;
+       /* relocations */
+       unsigned                nrelocs;
+       struct radeon_cs_reloc  *relocs;
+       struct radeon_cs_reloc  **relocs_ptr;
+       struct list_head        validated;
+       /* indices of various chunks */
+       int                     chunk_ib_idx;
+       int                     chunk_relocs_idx;
+       struct radeon_ib        *ib;
+       void                    *track;
+};
+
+struct radeon_cs_packet {
+       unsigned        idx;
+       unsigned        type;
+       unsigned        reg;
+       unsigned        opcode;
+       int             count;
+       unsigned        one_reg_wr;
+};
+
+typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
+                                     struct radeon_cs_packet *pkt,
+                                     unsigned idx, unsigned reg);
+typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
+                                     struct radeon_cs_packet *pkt);
+
+
+/*
+ * AGP
+ */
+int radeon_agp_init(struct radeon_device *rdev);
+void radeon_agp_fini(struct radeon_device *rdev);
+
+
+/*
+ * Writeback
+ */
+struct radeon_wb {
+       struct radeon_object    *wb_obj;
+       volatile uint32_t       *wb;
+       uint64_t                gpu_addr;
+};
+
+
+/*
+ * Benchmarking
+ */
+void radeon_benchmark(struct radeon_device *rdev);
+
+
+/*
+ * Debugfs
+ */
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+                            struct drm_info_list *files,
+                            unsigned nfiles);
+int radeon_debugfs_fence_init(struct radeon_device *rdev);
+int r100_debugfs_rbbm_init(struct radeon_device *rdev);
+int r100_debugfs_cp_init(struct radeon_device *rdev);
+
+
+/*
+ * ASIC specific functions.
+ */
+struct radeon_asic {
+       void (*errata)(struct radeon_device *rdev);
+       void (*vram_info)(struct radeon_device *rdev);
+       int (*gpu_reset)(struct radeon_device *rdev);
+       int (*mc_init)(struct radeon_device *rdev);
+       void (*mc_fini)(struct radeon_device *rdev);
+       int (*wb_init)(struct radeon_device *rdev);
+       void (*wb_fini)(struct radeon_device *rdev);
+       int (*gart_enable)(struct radeon_device *rdev);
+       void (*gart_disable)(struct radeon_device *rdev);
+       void (*gart_tlb_flush)(struct radeon_device *rdev);
+       int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
+       int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
+       void (*cp_fini)(struct radeon_device *rdev);
+       void (*cp_disable)(struct radeon_device *rdev);
+       void (*ring_start)(struct radeon_device *rdev);
+       int (*irq_set)(struct radeon_device *rdev);
+       int (*irq_process)(struct radeon_device *rdev);
+       void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
+       int (*cs_parse)(struct radeon_cs_parser *p);
+       int (*copy_blit)(struct radeon_device *rdev,
+                        uint64_t src_offset,
+                        uint64_t dst_offset,
+                        unsigned num_pages,
+                        struct radeon_fence *fence);
+       int (*copy_dma)(struct radeon_device *rdev,
+                       uint64_t src_offset,
+                       uint64_t dst_offset,
+                       unsigned num_pages,
+                       struct radeon_fence *fence);
+       int (*copy)(struct radeon_device *rdev,
+                   uint64_t src_offset,
+                   uint64_t dst_offset,
+                   unsigned num_pages,
+                   struct radeon_fence *fence);
+       void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+       void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+       void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
+       void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+};
+
+
+/*
+ * IOCTL.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *filp);
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *filp);
+int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *filp);
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *filp);
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *filp);
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *filp);
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+
+/*
+ * Core structure, functions and helpers.
+ */
+typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
+typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
+
+struct radeon_device {
+       struct drm_device               *ddev;
+       struct pci_dev                  *pdev;
+       /* ASIC */
+       enum radeon_family              family;
+       unsigned long                   flags;
+       int                             usec_timeout;
+       enum radeon_pll_errata          pll_errata;
+       int                             num_gb_pipes;
+       int                             disp_priority;
+       /* BIOS */
+       uint8_t                         *bios;
+       bool                            is_atom_bios;
+       uint16_t                        bios_header_start;
+       struct radeon_object            *stollen_vga_memory;
+       struct fb_info                  *fbdev_info;
+       struct radeon_object            *fbdev_robj;
+       struct radeon_framebuffer       *fbdev_rfb;
+       /* Register mmio */
+       unsigned long                   rmmio_base;
+       unsigned long                   rmmio_size;
+       void                            *rmmio;
+       radeon_rreg_t                   mm_rreg;
+       radeon_wreg_t                   mm_wreg;
+       radeon_rreg_t                   mc_rreg;
+       radeon_wreg_t                   mc_wreg;
+       radeon_rreg_t                   pll_rreg;
+       radeon_wreg_t                   pll_wreg;
+       radeon_rreg_t                   pcie_rreg;
+       radeon_wreg_t                   pcie_wreg;
+       radeon_rreg_t                   pciep_rreg;
+       radeon_wreg_t                   pciep_wreg;
+       struct radeon_clock             clock;
+       struct radeon_mc                mc;
+       struct radeon_gart              gart;
+       struct radeon_mode_info         mode_info;
+       struct radeon_scratch           scratch;
+       struct radeon_mman              mman;
+       struct radeon_fence_driver      fence_drv;
+       struct radeon_cp                cp;
+       struct radeon_ib_pool           ib_pool;
+       struct radeon_irq               irq;
+       struct radeon_asic              *asic;
+       struct radeon_gem               gem;
+       struct mutex                    cs_mutex;
+       struct radeon_wb                wb;
+       bool                            gpu_lockup;
+       bool                            shutdown;
+       bool                            suspend;
+};
+
+int radeon_device_init(struct radeon_device *rdev,
+                      struct drm_device *ddev,
+                      struct pci_dev *pdev,
+                      uint32_t flags);
+void radeon_device_fini(struct radeon_device *rdev);
+int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
+
+
+/*
+ * Registers read & write functions.
+ */
+#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
+#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
+#define RREG32(reg) rdev->mm_rreg(rdev, (reg))
+#define WREG32(reg, v) rdev->mm_wreg(rdev, (reg), (v))
+#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
+#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
+#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
+#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
+#define RREG32_PCIE(reg) rdev->pcie_rreg(rdev, (reg))
+#define WREG32_PCIE(reg, v) rdev->pcie_wreg(rdev, (reg), (v))
+#define WREG32_P(reg, val, mask)                               \
+       do {                                                    \
+               uint32_t tmp_ = RREG32(reg);                    \
+               tmp_ &= (mask);                                 \
+               tmp_ |= ((val) & ~(mask));                      \
+               WREG32(reg, tmp_);                              \
+       } while (0)
+#define WREG32_PLL_P(reg, val, mask)                           \
+       do {                                                    \
+               uint32_t tmp_ = RREG32_PLL(reg);                \
+               tmp_ &= (mask);                                 \
+               tmp_ |= ((val) & ~(mask));                      \
+               WREG32_PLL(reg, tmp_);                          \
+       } while (0)
+
+void r100_pll_errata_after_index(struct radeon_device *rdev);
+
+
+/*
+ * ASICs helpers.
+ */
+#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
+               (rdev->family == CHIP_RV200) || \
+               (rdev->family == CHIP_RS100) || \
+               (rdev->family == CHIP_RS200) || \
+               (rdev->family == CHIP_RV250) || \
+               (rdev->family == CHIP_RV280) || \
+               (rdev->family == CHIP_RS300))
+#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300)  ||    \
+               (rdev->family == CHIP_RV350) ||                 \
+               (rdev->family == CHIP_R350)  ||                 \
+               (rdev->family == CHIP_RV380) ||                 \
+               (rdev->family == CHIP_R420)  ||                 \
+               (rdev->family == CHIP_R423)  ||                 \
+               (rdev->family == CHIP_RV410) ||                 \
+               (rdev->family == CHIP_RS400) ||                 \
+               (rdev->family == CHIP_RS480))
+#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
+#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
+#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
+
+
+/*
+ * BIOS helpers.
+ */
+#define RBIOS8(i) (rdev->bios[i])
+#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
+#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
+
+int radeon_combios_init(struct radeon_device *rdev);
+void radeon_combios_fini(struct radeon_device *rdev);
+int radeon_atombios_init(struct radeon_device *rdev);
+void radeon_atombios_fini(struct radeon_device *rdev);
+
+
+/*
+ * RING helpers.
+ */
+#define CP_PACKET0                     0x00000000
+#define                PACKET0_BASE_INDEX_SHIFT        0
+#define                PACKET0_BASE_INDEX_MASK         (0x1ffff << 0)
+#define                PACKET0_COUNT_SHIFT             16
+#define                PACKET0_COUNT_MASK              (0x3fff << 16)
+#define CP_PACKET1                     0x40000000
+#define CP_PACKET2                     0x80000000
+#define                PACKET2_PAD_SHIFT               0
+#define                PACKET2_PAD_MASK                (0x3fffffff << 0)
+#define CP_PACKET3                     0xC0000000
+#define                PACKET3_IT_OPCODE_SHIFT         8
+#define                PACKET3_IT_OPCODE_MASK          (0xff << 8)
+#define                PACKET3_COUNT_SHIFT             16
+#define                PACKET3_COUNT_MASK              (0x3fff << 16)
+/* PACKET3 op code */
+#define                PACKET3_NOP                     0x10
+#define                PACKET3_3D_DRAW_VBUF            0x28
+#define                PACKET3_3D_DRAW_IMMD            0x29
+#define                PACKET3_3D_DRAW_INDX            0x2A
+#define                PACKET3_3D_LOAD_VBPNTR          0x2F
+#define                PACKET3_INDX_BUFFER             0x33
+#define                PACKET3_3D_DRAW_VBUF_2          0x34
+#define                PACKET3_3D_DRAW_IMMD_2          0x35
+#define                PACKET3_3D_DRAW_INDX_2          0x36
+#define                PACKET3_BITBLT_MULTI            0x9B
+
+#define PACKET0(reg, n)        (CP_PACKET0 |                                   \
+                        REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) |      \
+                        REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v)     (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n) (CP_PACKET3 |                                   \
+                        REG_SET(PACKET3_IT_OPCODE, (op)) |             \
+                        REG_SET(PACKET3_COUNT, (n)))
+
+#define        PACKET_TYPE0    0
+#define        PACKET_TYPE1    1
+#define        PACKET_TYPE2    2
+#define        PACKET_TYPE3    3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+
+static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+       if (rdev->cp.count_dw <= 0) {
+               DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+       }
+#endif
+       rdev->cp.ring[rdev->cp.wptr++] = v;
+       rdev->cp.wptr &= rdev->cp.ptr_mask;
+       rdev->cp.count_dw--;
+       rdev->cp.ring_free_dw--;
+}
+
+
+/*
+ * ASICs macro.
+ */
+#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
+#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
+#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
+#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
+#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
+#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
+#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
+#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
+#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
+#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
+#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
+#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
+#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
+#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
+#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
+#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
+#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
+#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
+#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
+#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
+#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
+#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
+#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
+#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
new file mode 100644 (file)
index 0000000..23ea995
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Airlie
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+#include "radeon_drm.h"
+
+#if __OS_HAS_AGP
+
+struct radeon_agpmode_quirk {
+       u32 hostbridge_vendor;
+       u32 hostbridge_device;
+       u32 chip_vendor;
+       u32 chip_device;
+       u32 subsys_vendor;
+       u32 subsys_device;
+       u32 default_mode;
+};
+
+static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
+       /* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */
+       { PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4},
+       /* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */
+       { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4},
+       /* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */
+       { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964,
+               0x148c, 0x2073, 4},
+       /* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */
+       { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59,
+               PCI_VENDOR_ID_IBM, 0x052f, 1},
+       /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
+       { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
+               PCI_VENDOR_ID_IBM, 0x0550, 1},
+       /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
+       { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
+               PCI_VENDOR_ID_IBM, 0x0530, 1},
+       /* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */
+       { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54,
+               PCI_VENDOR_ID_IBM, 0x054f, 2},
+       /* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */
+       { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+               PCI_VENDOR_ID_SONY, 0x816b, 2},
+       /* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */
+       { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+               PCI_VENDOR_ID_SONY, 0x8195, 8},
+       /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
+       { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
+               PCI_VENDOR_ID_DELL, 0x00e3, 2},
+       /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
+       { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
+               PCI_VENDOR_ID_DELL, 0x0149, 1},
+       /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
+       { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+               0x1025, 0x0061, 1},
+       /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */
+       { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+               0x1025, 0x0064, 1},
+       /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */
+       { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+               PCI_VENDOR_ID_ASUSTEK, 0x1942, 1},
+       /* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */
+       { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+               0x10cf, 0x127f, 1},
+       /* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */
+       { 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+               0x1787, 0x5960, 4},
+       /* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */
+       { PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960,
+               0x17af, 0x2020, 4},
+       /* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */
+       { PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153,
+               PCI_VENDOR_ID_ASUSTEK, 0x003c, 4},
+       /* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */
+       { PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c,
+               PCI_VENDOR_ID_ATI, 0x013a, 2},
+       /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */
+       { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+               PCI_VENDOR_ID_ASUSTEK, 0x004c, 2},
+       /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */
+       { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+               PCI_VENDOR_ID_ASUSTEK, 0x0054, 2},
+       /* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */
+       { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d,
+               0x174b, 0x7149, 4},
+       /* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */
+       { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+               0x1462, 0x0380, 4},
+       /* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */
+       { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964,
+               0x148c, 0x2073, 4},
+       /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
+       { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
+               PCI_VENDOR_ID_SONY, 0x8175, 1},
+       /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
+       { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
+               PCI_VENDOR_ID_ATI, 0x0152, 2},
+       { 0, 0, 0, 0, 0, 0, 0 },
+};
+#endif
+
+int radeon_agp_init(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+       struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
+       struct drm_agp_mode mode;
+       struct drm_agp_info info;
+       uint32_t agp_status;
+       int default_mode;
+       bool is_v3;
+       int ret;
+
+       /* Acquire AGP. */
+       if (!rdev->ddev->agp->acquired) {
+               ret = drm_agp_acquire(rdev->ddev);
+               if (ret) {
+                       DRM_ERROR("Unable to acquire AGP: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       ret = drm_agp_info(rdev->ddev, &info);
+       if (ret) {
+               DRM_ERROR("Unable to get AGP info: %d\n", ret);
+               return ret;
+       }
+       mode.mode = info.mode;
+       agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+       is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
+
+       if (is_v3) {
+               default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
+       } else {
+               if (agp_status & RADEON_AGP_4X_MODE) {
+                       default_mode = 4;
+               } else if (agp_status & RADEON_AGP_2X_MODE) {
+                       default_mode = 2;
+               } else {
+                       default_mode = 1;
+               }
+       }
+
+       /* Apply AGPMode Quirks */
+       while (p && p->chip_device != 0) {
+               if (info.id_vendor == p->hostbridge_vendor &&
+                   info.id_device == p->hostbridge_device &&
+                   rdev->pdev->vendor == p->chip_vendor &&
+                   rdev->pdev->device == p->chip_device &&
+                   rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+                   rdev->pdev->subsystem_device == p->subsys_device) {
+                       default_mode = p->default_mode;
+               }
+               ++p;
+       }
+
+       if (radeon_agpmode > 0) {
+               if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
+                   (radeon_agpmode > (is_v3 ? 8 : 4)) ||
+                   (radeon_agpmode & (radeon_agpmode - 1))) {
+                       DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
+                                 radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
+                                 default_mode);
+                       radeon_agpmode = default_mode;
+               } else {
+                       DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
+               }
+       } else {
+               radeon_agpmode = default_mode;
+       }
+
+       mode.mode &= ~RADEON_AGP_MODE_MASK;
+       if (is_v3) {
+               switch (radeon_agpmode) {
+               case 8:
+                       mode.mode |= RADEON_AGPv3_8X_MODE;
+                       break;
+               case 4:
+               default:
+                       mode.mode |= RADEON_AGPv3_4X_MODE;
+                       break;
+               }
+       } else {
+               switch (radeon_agpmode) {
+               case 4:
+                       mode.mode |= RADEON_AGP_4X_MODE;
+                       break;
+               case 2:
+                       mode.mode |= RADEON_AGP_2X_MODE;
+                       break;
+               case 1:
+               default:
+                       mode.mode |= RADEON_AGP_1X_MODE;
+                       break;
+               }
+       }
+
+       mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
+       ret = drm_agp_enable(rdev->ddev, mode);
+       if (ret) {
+               DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+               return ret;
+       }
+
+       rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
+       rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
+
+       /* workaround some hw issues */
+       if (rdev->family < CHIP_R200) {
+               WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
+       }
+       return 0;
+#else
+       return 0;
+#endif
+}
+
+void radeon_agp_fini(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+       if (rdev->flags & RADEON_IS_AGP) {
+               if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+                       drm_agp_release(rdev->ddev);
+               }
+       }
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
new file mode 100644 (file)
index 0000000..e57d8a7
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RADEON_ASIC_H__
+#define __RADEON_ASIC_H__
+
+/*
+ * common functions
+ */
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+
+void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+
+/*
+ * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ */
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void r100_errata(struct radeon_device *rdev);
+void r100_vram_info(struct radeon_device *rdev);
+int r100_gpu_reset(struct radeon_device *rdev);
+int r100_mc_init(struct radeon_device *rdev);
+void r100_mc_fini(struct radeon_device *rdev);
+int r100_wb_init(struct radeon_device *rdev);
+void r100_wb_fini(struct radeon_device *rdev);
+int r100_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+void r100_cp_fini(struct radeon_device *rdev);
+void r100_cp_disable(struct radeon_device *rdev);
+void r100_ring_start(struct radeon_device *rdev);
+int r100_irq_set(struct radeon_device *rdev);
+int r100_irq_process(struct radeon_device *rdev);
+void r100_fence_ring_emit(struct radeon_device *rdev,
+                         struct radeon_fence *fence);
+int r100_cs_parse(struct radeon_cs_parser *p);
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
+int r100_copy_blit(struct radeon_device *rdev,
+                  uint64_t src_offset,
+                  uint64_t dst_offset,
+                  unsigned num_pages,
+                  struct radeon_fence *fence);
+
+static struct radeon_asic r100_asic = {
+       .errata = &r100_errata,
+       .vram_info = &r100_vram_info,
+       .gpu_reset = &r100_gpu_reset,
+       .mc_init = &r100_mc_init,
+       .mc_fini = &r100_mc_fini,
+       .wb_init = &r100_wb_init,
+       .wb_fini = &r100_wb_fini,
+       .gart_enable = &r100_gart_enable,
+       .gart_disable = &r100_pci_gart_disable,
+       .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+       .gart_set_page = &r100_pci_gart_set_page,
+       .cp_init = &r100_cp_init,
+       .cp_fini = &r100_cp_fini,
+       .cp_disable = &r100_cp_disable,
+       .ring_start = &r100_ring_start,
+       .irq_set = &r100_irq_set,
+       .irq_process = &r100_irq_process,
+       .fence_ring_emit = &r100_fence_ring_emit,
+       .cs_parse = &r100_cs_parse,
+       .copy_blit = &r100_copy_blit,
+       .copy_dma = NULL,
+       .copy = &r100_copy_blit,
+       .set_engine_clock = &radeon_legacy_set_engine_clock,
+       .set_memory_clock = NULL,
+       .set_pcie_lanes = NULL,
+       .set_clock_gating = &radeon_legacy_set_clock_gating,
+};
+
+
+/*
+ * r300,r350,rv350,rv380
+ */
+void r300_errata(struct radeon_device *rdev);
+void r300_vram_info(struct radeon_device *rdev);
+int r300_gpu_reset(struct radeon_device *rdev);
+int r300_mc_init(struct radeon_device *rdev);
+void r300_mc_fini(struct radeon_device *rdev);
+void r300_ring_start(struct radeon_device *rdev);
+void r300_fence_ring_emit(struct radeon_device *rdev,
+                         struct radeon_fence *fence);
+int r300_cs_parse(struct radeon_cs_parser *p);
+int r300_gart_enable(struct radeon_device *rdev);
+void rv370_pcie_gart_disable(struct radeon_device *rdev);
+void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+int r300_copy_dma(struct radeon_device *rdev,
+                 uint64_t src_offset,
+                 uint64_t dst_offset,
+                 unsigned num_pages,
+                 struct radeon_fence *fence);
+static struct radeon_asic r300_asic = {
+       .errata = &r300_errata,
+       .vram_info = &r300_vram_info,
+       .gpu_reset = &r300_gpu_reset,
+       .mc_init = &r300_mc_init,
+       .mc_fini = &r300_mc_fini,
+       .wb_init = &r100_wb_init,
+       .wb_fini = &r100_wb_fini,
+       .gart_enable = &r300_gart_enable,
+       .gart_disable = &r100_pci_gart_disable,
+       .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+       .gart_set_page = &r100_pci_gart_set_page,
+       .cp_init = &r100_cp_init,
+       .cp_fini = &r100_cp_fini,
+       .cp_disable = &r100_cp_disable,
+       .ring_start = &r300_ring_start,
+       .irq_set = &r100_irq_set,
+       .irq_process = &r100_irq_process,
+       .fence_ring_emit = &r300_fence_ring_emit,
+       .cs_parse = &r300_cs_parse,
+       .copy_blit = &r100_copy_blit,
+       .copy_dma = &r300_copy_dma,
+       .copy = &r100_copy_blit,
+       .set_engine_clock = &radeon_legacy_set_engine_clock,
+       .set_memory_clock = NULL,
+       .set_pcie_lanes = &rv370_set_pcie_lanes,
+       .set_clock_gating = &radeon_legacy_set_clock_gating,
+};
+
+/*
+ * r420,r423,rv410
+ */
+void r420_errata(struct radeon_device *rdev);
+void r420_vram_info(struct radeon_device *rdev);
+int r420_mc_init(struct radeon_device *rdev);
+void r420_mc_fini(struct radeon_device *rdev);
+static struct radeon_asic r420_asic = {
+       .errata = &r420_errata,
+       .vram_info = &r420_vram_info,
+       .gpu_reset = &r300_gpu_reset,
+       .mc_init = &r420_mc_init,
+       .mc_fini = &r420_mc_fini,
+       .wb_init = &r100_wb_init,
+       .wb_fini = &r100_wb_fini,
+       .gart_enable = &r300_gart_enable,
+       .gart_disable = &rv370_pcie_gart_disable,
+       .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+       .gart_set_page = &rv370_pcie_gart_set_page,
+       .cp_init = &r100_cp_init,
+       .cp_fini = &r100_cp_fini,
+       .cp_disable = &r100_cp_disable,
+       .ring_start = &r300_ring_start,
+       .irq_set = &r100_irq_set,
+       .irq_process = &r100_irq_process,
+       .fence_ring_emit = &r300_fence_ring_emit,
+       .cs_parse = &r300_cs_parse,
+       .copy_blit = &r100_copy_blit,
+       .copy_dma = &r300_copy_dma,
+       .copy = &r100_copy_blit,
+       .set_engine_clock = &radeon_atom_set_engine_clock,
+       .set_memory_clock = &radeon_atom_set_memory_clock,
+       .set_pcie_lanes = &rv370_set_pcie_lanes,
+       .set_clock_gating = &radeon_atom_set_clock_gating,
+};
+
+
+/*
+ * rs400,rs480
+ */
+void rs400_errata(struct radeon_device *rdev);
+void rs400_vram_info(struct radeon_device *rdev);
+int rs400_mc_init(struct radeon_device *rdev);
+void rs400_mc_fini(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_tlb_flush(struct radeon_device *rdev);
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+static struct radeon_asic rs400_asic = {
+       .errata = &rs400_errata,
+       .vram_info = &rs400_vram_info,
+       .gpu_reset = &r300_gpu_reset,
+       .mc_init = &rs400_mc_init,
+       .mc_fini = &rs400_mc_fini,
+       .wb_init = &r100_wb_init,
+       .wb_fini = &r100_wb_fini,
+       .gart_enable = &rs400_gart_enable,
+       .gart_disable = &rs400_gart_disable,
+       .gart_tlb_flush = &rs400_gart_tlb_flush,
+       .gart_set_page = &rs400_gart_set_page,
+       .cp_init = &r100_cp_init,
+       .cp_fini = &r100_cp_fini,
+       .cp_disable = &r100_cp_disable,
+       .ring_start = &r300_ring_start,
+       .irq_set = &r100_irq_set,
+       .irq_process = &r100_irq_process,
+       .fence_ring_emit = &r300_fence_ring_emit,
+       .cs_parse = &r300_cs_parse,
+       .copy_blit = &r100_copy_blit,
+       .copy_dma = &r300_copy_dma,
+       .copy = &r100_copy_blit,
+       .set_engine_clock = &radeon_legacy_set_engine_clock,
+       .set_memory_clock = NULL,
+       .set_pcie_lanes = NULL,
+       .set_clock_gating = &radeon_legacy_set_clock_gating,
+};
+
+
+/*
+ * rs600.
+ */
+void rs600_errata(struct radeon_device *rdev);
+void rs600_vram_info(struct radeon_device *rdev);
+int rs600_mc_init(struct radeon_device *rdev);
+void rs600_mc_fini(struct radeon_device *rdev);
+int rs600_irq_set(struct radeon_device *rdev);
+int rs600_gart_enable(struct radeon_device *rdev);
+void rs600_gart_disable(struct radeon_device *rdev);
+void rs600_gart_tlb_flush(struct radeon_device *rdev);
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+static struct radeon_asic rs600_asic = {
+       .errata = &rs600_errata,
+       .vram_info = &rs600_vram_info,
+       .gpu_reset = &r300_gpu_reset,
+       .mc_init = &rs600_mc_init,
+       .mc_fini = &rs600_mc_fini,
+       .wb_init = &r100_wb_init,
+       .wb_fini = &r100_wb_fini,
+       .gart_enable = &rs600_gart_enable,
+       .gart_disable = &rs600_gart_disable,
+       .gart_tlb_flush = &rs600_gart_tlb_flush,
+       .gart_set_page = &rs600_gart_set_page,
+       .cp_init = &r100_cp_init,
+       .cp_fini = &r100_cp_fini,
+       .cp_disable = &r100_cp_disable,
+       .ring_start = &r300_ring_start,
+       .irq_set = &rs600_irq_set,
+       .irq_process = &r100_irq_process,
+       .fence_ring_emit = &r300_fence_ring_emit,
+       .cs_parse = &r300_cs_parse,
+       .copy_blit = &r100_copy_blit,
+       .copy_dma = &r300_copy_dma,
+       .copy = &r100_copy_blit,
+       .set_engine_clock = &radeon_atom_set_engine_clock,
+       .set_memory_clock = &radeon_atom_set_memory_clock,
+       .set_pcie_lanes = NULL,
+       .set_clock_gating = &radeon_atom_set_clock_gating,
+};
+
+
+/*
+ * rs690,rs740
+ */
+void rs690_errata(struct radeon_device *rdev);
+void rs690_vram_info(struct radeon_device *rdev);
+int rs690_mc_init(struct radeon_device *rdev);
+void rs690_mc_fini(struct radeon_device *rdev);
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+static struct radeon_asic rs690_asic = {
+       .errata = &rs690_errata,
+       .vram_info = &rs690_vram_info,
+       .gpu_reset = &r300_gpu_reset,
+       .mc_init = &rs690_mc_init,
+       .mc_fini = &rs690_mc_fini,
+       .wb_init = &r100_wb_init,
+       .wb_fini = &r100_wb_fini,
+       .gart_enable = &rs400_gart_enable,
+       .gart_disable = &rs400_gart_disable,
+       .gart_tlb_flush = &rs400_gart_tlb_flush,
+       .gart_set_page = &rs400_gart_set_page,
+       .cp_init = &r100_cp_init,
+       .cp_fini = &r100_cp_fini,
+       .cp_disable = &r100_cp_disable,
+       .ring_start = &r300_ring_start,
+       .irq_set = &rs600_irq_set,
+       .irq_process = &r100_irq_process,
+       .fence_ring_emit = &r300_fence_ring_emit,
+       .cs_parse = &r300_cs_parse,
+       .copy_blit = &r100_copy_blit,
+       .copy_dma = &r300_copy_dma,
+       .copy = &r300_copy_dma,
+       .set_engine_clock = &radeon_atom_set_engine_clock,
+       .set_memory_clock = &radeon_atom_set_memory_clock,
+       .set_pcie_lanes = NULL,
+       .set_clock_gating = &radeon_atom_set_clock_gating,
+};
+
+
+/*
+ * rv515
+ */
+void rv515_errata(struct radeon_device *rdev);
+void rv515_vram_info(struct radeon_device *rdev);
+int rv515_gpu_reset(struct radeon_device *rdev);
+int rv515_mc_init(struct radeon_device *rdev);
+void rv515_mc_fini(struct radeon_device *rdev);
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rv515_ring_start(struct radeon_device *rdev);
+uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+static struct radeon_asic rv515_asic = {
+       .errata = &rv515_errata,
+       .vram_info = &rv515_vram_info,
+       .gpu_reset = &rv515_gpu_reset,
+       .mc_init = &rv515_mc_init,
+       .mc_fini = &rv515_mc_fini,
+       .wb_init = &r100_wb_init,
+       .wb_fini = &r100_wb_fini,
+       .gart_enable = &r300_gart_enable,
+       .gart_disable = &rv370_pcie_gart_disable,
+       .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+       .gart_set_page = &rv370_pcie_gart_set_page,
+       .cp_init = &r100_cp_init,
+       .cp_fini = &r100_cp_fini,
+       .cp_disable = &r100_cp_disable,
+       .ring_start = &rv515_ring_start,
+       .irq_set = &r100_irq_set,
+       .irq_process = &r100_irq_process,
+       .fence_ring_emit = &r300_fence_ring_emit,
+       .cs_parse = &r100_cs_parse,
+       .copy_blit = &r100_copy_blit,
+       .copy_dma = &r300_copy_dma,
+       .copy = &r100_copy_blit,
+       .set_engine_clock = &radeon_atom_set_engine_clock,
+       .set_memory_clock = &radeon_atom_set_memory_clock,
+       .set_pcie_lanes = &rv370_set_pcie_lanes,
+       .set_clock_gating = &radeon_atom_set_clock_gating,
+};
+
+
+/*
+ * r520,rv530,rv560,rv570,r580
+ */
+void r520_errata(struct radeon_device *rdev);
+void r520_vram_info(struct radeon_device *rdev);
+int r520_mc_init(struct radeon_device *rdev);
+void r520_mc_fini(struct radeon_device *rdev);
+static struct radeon_asic r520_asic = {
+       .errata = &r520_errata,
+       .vram_info = &r520_vram_info,
+       .gpu_reset = &rv515_gpu_reset,
+       .mc_init = &r520_mc_init,
+       .mc_fini = &r520_mc_fini,
+       .wb_init = &r100_wb_init,
+       .wb_fini = &r100_wb_fini,
+       .gart_enable = &r300_gart_enable,
+       .gart_disable = &rv370_pcie_gart_disable,
+       .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+       .gart_set_page = &rv370_pcie_gart_set_page,
+       .cp_init = &r100_cp_init,
+       .cp_fini = &r100_cp_fini,
+       .cp_disable = &r100_cp_disable,
+       .ring_start = &rv515_ring_start,
+       .irq_set = &r100_irq_set,
+       .irq_process = &r100_irq_process,
+       .fence_ring_emit = &r300_fence_ring_emit,
+       .cs_parse = &r100_cs_parse,
+       .copy_blit = &r100_copy_blit,
+       .copy_dma = &r300_copy_dma,
+       .copy = &r100_copy_blit,
+       .set_engine_clock = &radeon_atom_set_engine_clock,
+       .set_memory_clock = &radeon_atom_set_memory_clock,
+       .set_pcie_lanes = &rv370_set_pcie_lanes,
+       .set_clock_gating = &radeon_atom_set_clock_gating,
+};
+
+/*
+ * r600,rv610,rv630,rv620,rv635,rv670,rs780,rv770,rv730,rv710
+ */
+uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
+void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
new file mode 100644 (file)
index 0000000..786632d
--- /dev/null
@@ -0,0 +1,1298 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
+                     uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+extern void
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
+                       uint32_t supported_device);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+                         uint32_t connector_id,
+                         uint32_t supported_device,
+                         int connector_type,
+                         struct radeon_i2c_bus_rec *i2c_bus,
+                         bool linkb, uint32_t igp_lane_info);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+                         uint32_t supported_device);
+
+union atom_supported_devices {
+       struct _ATOM_SUPPORTED_DEVICES_INFO info;
+       struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
+       struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
+};
+
+static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
+                                                          *dev, uint8_t id)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct atom_context *ctx = rdev->mode_info.atom_context;
+       ATOM_GPIO_I2C_ASSIGMENT gpio;
+       struct radeon_i2c_bus_rec i2c;
+       int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+       struct _ATOM_GPIO_I2C_INFO *i2c_info;
+       uint16_t data_offset;
+
+       memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+       i2c.valid = false;
+
+       atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+
+       i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+       gpio = i2c_info->asGPIO_Info[id];
+
+       i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
+       i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
+       i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
+       i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
+       i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
+       i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
+       i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
+       i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
+       i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
+       i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
+       i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
+       i2c.put_data_mask = (1 << gpio.ucDataEnShift);
+       i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
+       i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
+       i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
+       i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
+       i2c.valid = true;
+
+       return i2c;
+}
+
+static bool radeon_atom_apply_quirks(struct drm_device *dev,
+                                    uint32_t supported_device,
+                                    int *connector_type,
+                                    struct radeon_i2c_bus_rec *i2c_bus)
+{
+
+       /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
+       if ((dev->pdev->device == 0x791e) &&
+           (dev->pdev->subsystem_vendor == 0x1043) &&
+           (dev->pdev->subsystem_device == 0x826d)) {
+               if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+                   (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+                       *connector_type = DRM_MODE_CONNECTOR_DVID;
+       }
+
+       /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
+       if ((dev->pdev->device == 0x7941) &&
+           (dev->pdev->subsystem_vendor == 0x147b) &&
+           (dev->pdev->subsystem_device == 0x2412)) {
+               if (*connector_type == DRM_MODE_CONNECTOR_DVII)
+                       return false;
+       }
+
+       /* Falcon NW laptop lists vga ddc line for LVDS */
+       if ((dev->pdev->device == 0x5653) &&
+           (dev->pdev->subsystem_vendor == 0x1462) &&
+           (dev->pdev->subsystem_device == 0x0291)) {
+               if (*connector_type == DRM_MODE_CONNECTOR_LVDS)
+                       i2c_bus->valid = false;
+       }
+
+       /* Funky macbooks */
+       if ((dev->pdev->device == 0x71C5) &&
+           (dev->pdev->subsystem_vendor == 0x106b) &&
+           (dev->pdev->subsystem_device == 0x0080)) {
+               if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
+                   (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+                       return false;
+       }
+
+       /* some BIOSes seem to report DAC on HDMI - they hurt me with their lies */
+       if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
+           (*connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
+               if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) {
+                       return false;
+               }
+       }
+
+       /* ASUS HD 3600 XT board lists the DVI port as HDMI */
+       if ((dev->pdev->device == 0x9598) &&
+           (dev->pdev->subsystem_vendor == 0x1043) &&
+           (dev->pdev->subsystem_device == 0x01da)) {
+               if (*connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+                       *connector_type = DRM_MODE_CONNECTOR_DVID;
+               }
+       }
+
+       return true;
+}
+
+const int supported_devices_connector_convert[] = {
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_VGA,
+       DRM_MODE_CONNECTOR_DVII,
+       DRM_MODE_CONNECTOR_DVID,
+       DRM_MODE_CONNECTOR_DVIA,
+       DRM_MODE_CONNECTOR_SVIDEO,
+       DRM_MODE_CONNECTOR_Composite,
+       DRM_MODE_CONNECTOR_LVDS,
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_HDMIA,
+       DRM_MODE_CONNECTOR_HDMIB,
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_9PinDIN,
+       DRM_MODE_CONNECTOR_DisplayPort
+};
+
+const int object_connector_convert[] = {
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_DVII,
+       DRM_MODE_CONNECTOR_DVII,
+       DRM_MODE_CONNECTOR_DVID,
+       DRM_MODE_CONNECTOR_DVID,
+       DRM_MODE_CONNECTOR_VGA,
+       DRM_MODE_CONNECTOR_Composite,
+       DRM_MODE_CONNECTOR_SVIDEO,
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_9PinDIN,
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_HDMIA,
+       DRM_MODE_CONNECTOR_HDMIB,
+       DRM_MODE_CONNECTOR_HDMIB,
+       DRM_MODE_CONNECTOR_LVDS,
+       DRM_MODE_CONNECTOR_9PinDIN,
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_DisplayPort
+};
+
+bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       struct atom_context *ctx = mode_info->atom_context;
+       int index = GetIndexIntoMasterTable(DATA, Object_Header);
+       uint16_t size, data_offset;
+       uint8_t frev, crev, line_mux = 0;
+       ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+       ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+       ATOM_OBJECT_HEADER *obj_header;
+       int i, j, path_size, device_support;
+       int connector_type;
+       uint16_t igp_lane_info;
+       bool linkb;
+       struct radeon_i2c_bus_rec ddc_bus;
+
+       atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+
+       if (data_offset == 0)
+               return false;
+
+       if (crev < 2)
+               return false;
+
+       obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+       path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+           (ctx->bios + data_offset +
+            le16_to_cpu(obj_header->usDisplayPathTableOffset));
+       con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
+           (ctx->bios + data_offset +
+            le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+       device_support = le16_to_cpu(obj_header->usDeviceSupport);
+
+       path_size = 0;
+       for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
+               uint8_t *addr = (uint8_t *) path_obj->asDispPath;
+               ATOM_DISPLAY_OBJECT_PATH *path;
+               addr += path_size;
+               path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
+               path_size += le16_to_cpu(path->usSize);
+               linkb = false;
+
+               if (device_support & le16_to_cpu(path->usDeviceTag)) {
+                       uint8_t con_obj_id, con_obj_num, con_obj_type;
+
+                       con_obj_id =
+                           (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
+                           >> OBJECT_ID_SHIFT;
+                       con_obj_num =
+                           (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
+                           >> ENUM_ID_SHIFT;
+                       con_obj_type =
+                           (le16_to_cpu(path->usConnObjectId) &
+                            OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+                       if ((le16_to_cpu(path->usDeviceTag) ==
+                            ATOM_DEVICE_TV1_SUPPORT)
+                           || (le16_to_cpu(path->usDeviceTag) ==
+                               ATOM_DEVICE_TV2_SUPPORT)
+                           || (le16_to_cpu(path->usDeviceTag) ==
+                               ATOM_DEVICE_CV_SUPPORT))
+                               continue;
+
+                       if ((rdev->family == CHIP_RS780) &&
+                           (con_obj_id ==
+                            CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
+                               uint16_t igp_offset = 0;
+                               ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj;
+
+                               index =
+                                   GetIndexIntoMasterTable(DATA,
+                                                           IntegratedSystemInfo);
+
+                               atom_parse_data_header(ctx, index, &size, &frev,
+                                                      &crev, &igp_offset);
+
+                               if (crev >= 2) {
+                                       igp_obj =
+                                           (ATOM_INTEGRATED_SYSTEM_INFO_V2
+                                            *) (ctx->bios + igp_offset);
+
+                                       if (igp_obj) {
+                                               uint32_t slot_config, ct;
+
+                                               if (con_obj_num == 1)
+                                                       slot_config =
+                                                           igp_obj->
+                                                           ulDDISlot1Config;
+                                               else
+                                                       slot_config =
+                                                           igp_obj->
+                                                           ulDDISlot2Config;
+
+                                               ct = (slot_config >> 16) & 0xff;
+                                               connector_type =
+                                                   object_connector_convert
+                                                   [ct];
+                                               igp_lane_info =
+                                                   slot_config & 0xffff;
+                                       } else
+                                               continue;
+                               } else
+                                       continue;
+                       } else {
+                               igp_lane_info = 0;
+                               connector_type =
+                                   object_connector_convert[con_obj_id];
+                       }
+
+                       if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+                               continue;
+
+                       for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2);
+                            j++) {
+                               uint8_t enc_obj_id, enc_obj_num, enc_obj_type;
+
+                               enc_obj_id =
+                                   (le16_to_cpu(path->usGraphicObjIds[j]) &
+                                    OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+                               enc_obj_num =
+                                   (le16_to_cpu(path->usGraphicObjIds[j]) &
+                                    ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+                               enc_obj_type =
+                                   (le16_to_cpu(path->usGraphicObjIds[j]) &
+                                    OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+                               /* FIXME: add support for router objects */
+                               if (enc_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
+                                       if (enc_obj_num == 2)
+                                               linkb = true;
+                                       else
+                                               linkb = false;
+
+                                       radeon_add_atom_encoder(dev,
+                                                               enc_obj_id,
+                                                               le16_to_cpu
+                                                               (path->
+                                                                usDeviceTag));
+
+                               }
+                       }
+
+                       /* look up gpio for ddc */
+                       if ((le16_to_cpu(path->usDeviceTag) &
+                            (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+                           == 0) {
+                               for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
+                                       if (le16_to_cpu(path->usConnObjectId) ==
+                                           le16_to_cpu(con_obj->asObjects[j].
+                                                       usObjectID)) {
+                                               ATOM_COMMON_RECORD_HEADER
+                                                   *record =
+                                                   (ATOM_COMMON_RECORD_HEADER
+                                                    *)
+                                                   (ctx->bios + data_offset +
+                                                    le16_to_cpu(con_obj->
+                                                                asObjects[j].
+                                                                usRecordOffset));
+                                               ATOM_I2C_RECORD *i2c_record;
+
+                                               while (record->ucRecordType > 0
+                                                      && record->
+                                                      ucRecordType <=
+                                                      ATOM_MAX_OBJECT_RECORD_NUMBER) {
+                                                       DRM_ERROR
+                                                           ("record type %d\n",
+                                                            record->
+                                                            ucRecordType);
+                                                       switch (record->
+                                                               ucRecordType) {
+                                                       case ATOM_I2C_RECORD_TYPE:
+                                                               i2c_record =
+                                                                   (ATOM_I2C_RECORD
+                                                                    *) record;
+                                                               line_mux =
+                                                                   i2c_record->
+                                                                   sucI2cId.
+                                                                   bfI2C_LineMux;
+                                                               break;
+                                                       }
+                                                       record =
+                                                           (ATOM_COMMON_RECORD_HEADER
+                                                            *) ((char *)record
+                                                                +
+                                                                record->
+                                                                ucRecordSize);
+                                               }
+                                               break;
+                                       }
+                               }
+                       } else
+                               line_mux = 0;
+
+                       if ((le16_to_cpu(path->usDeviceTag) ==
+                            ATOM_DEVICE_TV1_SUPPORT)
+                           || (le16_to_cpu(path->usDeviceTag) ==
+                               ATOM_DEVICE_TV2_SUPPORT)
+                           || (le16_to_cpu(path->usDeviceTag) ==
+                               ATOM_DEVICE_CV_SUPPORT))
+                               ddc_bus.valid = false;
+                       else
+                               ddc_bus = radeon_lookup_gpio(dev, line_mux);
+
+                       radeon_add_atom_connector(dev,
+                                                 le16_to_cpu(path->
+                                                             usConnObjectId),
+                                                 le16_to_cpu(path->
+                                                             usDeviceTag),
+                                                 connector_type, &ddc_bus,
+                                                 linkb, igp_lane_info);
+
+               }
+       }
+
+       radeon_link_encoder_connector(dev);
+
+       return true;
+}
+
+struct bios_connector {
+       bool valid;
+       uint8_t line_mux;
+       uint16_t devices;
+       int connector_type;
+       struct radeon_i2c_bus_rec ddc_bus;
+};
+
+bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+                                                                drm_device
+                                                                *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       struct atom_context *ctx = mode_info->atom_context;
+       int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo);
+       uint16_t size, data_offset;
+       uint8_t frev, crev;
+       uint16_t device_support;
+       uint8_t dac;
+       union atom_supported_devices *supported_devices;
+       int i, j;
+       struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+
+       atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+
+       supported_devices =
+           (union atom_supported_devices *)(ctx->bios + data_offset);
+
+       device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
+
+       for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+               ATOM_CONNECTOR_INFO_I2C ci =
+                   supported_devices->info.asConnInfo[i];
+
+               bios_connectors[i].valid = false;
+
+               if (!(device_support & (1 << i))) {
+                       continue;
+               }
+
+               if (i == ATOM_DEVICE_CV_INDEX) {
+                       DRM_DEBUG("Skipping Component Video\n");
+                       continue;
+               }
+
+               if (i == ATOM_DEVICE_TV1_INDEX) {
+                       DRM_DEBUG("Skipping TV Out\n");
+                       continue;
+               }
+
+               bios_connectors[i].connector_type =
+                   supported_devices_connector_convert[ci.sucConnectorInfo.
+                                                       sbfAccess.
+                                                       bfConnectorType];
+
+               if (bios_connectors[i].connector_type ==
+                   DRM_MODE_CONNECTOR_Unknown)
+                       continue;
+
+               dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
+
+               if ((rdev->family == CHIP_RS690) ||
+                   (rdev->family == CHIP_RS740)) {
+                       if ((i == ATOM_DEVICE_DFP2_INDEX)
+                           && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
+                               bios_connectors[i].line_mux =
+                                   ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
+                       else if ((i == ATOM_DEVICE_DFP3_INDEX)
+                                && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
+                               bios_connectors[i].line_mux =
+                                   ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
+                       else
+                               bios_connectors[i].line_mux =
+                                   ci.sucI2cId.sbfAccess.bfI2C_LineMux;
+               } else
+                       bios_connectors[i].line_mux =
+                           ci.sucI2cId.sbfAccess.bfI2C_LineMux;
+
+               /* give tv unique connector ids */
+               if (i == ATOM_DEVICE_TV1_INDEX) {
+                       bios_connectors[i].ddc_bus.valid = false;
+                       bios_connectors[i].line_mux = 50;
+               } else if (i == ATOM_DEVICE_TV2_INDEX) {
+                       bios_connectors[i].ddc_bus.valid = false;
+                       bios_connectors[i].line_mux = 51;
+               } else if (i == ATOM_DEVICE_CV_INDEX) {
+                       bios_connectors[i].ddc_bus.valid = false;
+                       bios_connectors[i].line_mux = 52;
+               } else
+                       bios_connectors[i].ddc_bus =
+                           radeon_lookup_gpio(dev,
+                                              bios_connectors[i].line_mux);
+
+               /* Always set the connector type to VGA for CRT1/CRT2. if they are
+                * shared with a DVI port, we'll pick up the DVI connector when we
+                * merge the outputs.  Some bioses incorrectly list VGA ports as DVI.
+                */
+               if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX)
+                       bios_connectors[i].connector_type =
+                           DRM_MODE_CONNECTOR_VGA;
+
+               if (!radeon_atom_apply_quirks
+                   (dev, (1 << i), &bios_connectors[i].connector_type,
+                    &bios_connectors[i].ddc_bus))
+                       continue;
+
+               bios_connectors[i].valid = true;
+               bios_connectors[i].devices = (1 << i);
+
+               if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
+                       radeon_add_atom_encoder(dev,
+                                               radeon_get_encoder_id(dev,
+                                                                     (1 << i),
+                                                                     dac),
+                                               (1 << i));
+               else
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       (1 <<
+                                                                        i),
+                                                                       dac),
+                                                 (1 << i));
+       }
+
+       /* combine shared connectors */
+       for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+               if (bios_connectors[i].valid) {
+                       for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) {
+                               if (bios_connectors[j].valid && (i != j)) {
+                                       if (bios_connectors[i].line_mux ==
+                                           bios_connectors[j].line_mux) {
+                                               if (((bios_connectors[i].
+                                                     devices &
+                                                     (ATOM_DEVICE_DFP_SUPPORT))
+                                                    && (bios_connectors[j].
+                                                        devices &
+                                                        (ATOM_DEVICE_CRT_SUPPORT)))
+                                                   ||
+                                                   ((bios_connectors[j].
+                                                     devices &
+                                                     (ATOM_DEVICE_DFP_SUPPORT))
+                                                    && (bios_connectors[i].
+                                                        devices &
+                                                        (ATOM_DEVICE_CRT_SUPPORT)))) {
+                                                       bios_connectors[i].
+                                                           devices |=
+                                                           bios_connectors[j].
+                                                           devices;
+                                                       bios_connectors[i].
+                                                           connector_type =
+                                                           DRM_MODE_CONNECTOR_DVII;
+                                                       bios_connectors[j].
+                                                           valid = false;
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+
+       /* add the connectors */
+       for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+               if (bios_connectors[i].valid)
+                       radeon_add_atom_connector(dev,
+                                                 bios_connectors[i].line_mux,
+                                                 bios_connectors[i].devices,
+                                                 bios_connectors[i].
+                                                 connector_type,
+                                                 &bios_connectors[i].ddc_bus,
+                                                 false, 0);
+       }
+
+       radeon_link_encoder_connector(dev);
+
+       return true;
+}
+
+union firmware_info {
+       ATOM_FIRMWARE_INFO info;
+       ATOM_FIRMWARE_INFO_V1_2 info_12;
+       ATOM_FIRMWARE_INFO_V1_3 info_13;
+       ATOM_FIRMWARE_INFO_V1_4 info_14;
+};
+
+bool radeon_atom_get_clock_info(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+       union firmware_info *firmware_info;
+       uint8_t frev, crev;
+       struct radeon_pll *p1pll = &rdev->clock.p1pll;
+       struct radeon_pll *p2pll = &rdev->clock.p2pll;
+       struct radeon_pll *spll = &rdev->clock.spll;
+       struct radeon_pll *mpll = &rdev->clock.mpll;
+       uint16_t data_offset;
+
+       atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+                              &crev, &data_offset);
+
+       firmware_info =
+           (union firmware_info *)(mode_info->atom_context->bios +
+                                   data_offset);
+
+       if (firmware_info) {
+               /* pixel clocks */
+               p1pll->reference_freq =
+                   le16_to_cpu(firmware_info->info.usReferenceClock);
+               p1pll->reference_div = 0;
+
+               p1pll->pll_out_min =
+                   le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+               p1pll->pll_out_max =
+                   le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+
+               if (p1pll->pll_out_min == 0) {
+                       if (ASIC_IS_AVIVO(rdev))
+                               p1pll->pll_out_min = 64800;
+                       else
+                               p1pll->pll_out_min = 20000;
+               }
+
+               p1pll->pll_in_min =
+                   le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
+               p1pll->pll_in_max =
+                   le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
+
+               *p2pll = *p1pll;
+
+               /* system clock */
+               spll->reference_freq =
+                   le16_to_cpu(firmware_info->info.usReferenceClock);
+               spll->reference_div = 0;
+
+               spll->pll_out_min =
+                   le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
+               spll->pll_out_max =
+                   le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
+
+               /* ??? */
+               if (spll->pll_out_min == 0) {
+                       if (ASIC_IS_AVIVO(rdev))
+                               spll->pll_out_min = 64800;
+                       else
+                               spll->pll_out_min = 20000;
+               }
+
+               spll->pll_in_min =
+                   le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
+               spll->pll_in_max =
+                   le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
+
+               /* memory clock */
+               mpll->reference_freq =
+                   le16_to_cpu(firmware_info->info.usReferenceClock);
+               mpll->reference_div = 0;
+
+               mpll->pll_out_min =
+                   le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
+               mpll->pll_out_max =
+                   le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
+
+               /* ??? */
+               if (mpll->pll_out_min == 0) {
+                       if (ASIC_IS_AVIVO(rdev))
+                               mpll->pll_out_min = 64800;
+                       else
+                               mpll->pll_out_min = 20000;
+               }
+
+               mpll->pll_in_min =
+                   le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
+               mpll->pll_in_max =
+                   le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
+
+               rdev->clock.default_sclk =
+                   le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
+               rdev->clock.default_mclk =
+                   le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
+
+               return true;
+       }
+       return false;
+}
+
+struct radeon_encoder_int_tmds *radeon_atombios_get_tmds_info(struct
+                                                             radeon_encoder
+                                                             *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, TMDS_Info);
+       uint16_t data_offset;
+       struct _ATOM_TMDS_INFO *tmds_info;
+       uint8_t frev, crev;
+       uint16_t maxfreq;
+       int i;
+       struct radeon_encoder_int_tmds *tmds = NULL;
+
+       atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+                              &crev, &data_offset);
+
+       tmds_info =
+           (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
+                                      data_offset);
+
+       if (tmds_info) {
+               tmds =
+                   kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+
+               if (!tmds)
+                       return NULL;
+
+               maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
+               for (i = 0; i < 4; i++) {
+                       tmds->tmds_pll[i].freq =
+                           le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency);
+                       tmds->tmds_pll[i].value =
+                           tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f;
+                       tmds->tmds_pll[i].value |=
+                           (tmds_info->asMiscInfo[i].
+                            ucPLL_VCO_Gain & 0x3f) << 6;
+                       tmds->tmds_pll[i].value |=
+                           (tmds_info->asMiscInfo[i].
+                            ucPLL_DutyCycle & 0xf) << 12;
+                       tmds->tmds_pll[i].value |=
+                           (tmds_info->asMiscInfo[i].
+                            ucPLL_VoltageSwing & 0xf) << 16;
+
+                       DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n",
+                                 tmds->tmds_pll[i].freq,
+                                 tmds->tmds_pll[i].value);
+
+                       if (maxfreq == tmds->tmds_pll[i].freq) {
+                               tmds->tmds_pll[i].freq = 0xffffffff;
+                               break;
+                       }
+               }
+       }
+       return tmds;
+}
+
+union lvds_info {
+       struct _ATOM_LVDS_INFO info;
+       struct _ATOM_LVDS_INFO_V12 info_12;
+};
+
+struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+                                                             radeon_encoder
+                                                             *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
+       uint16_t data_offset;
+       union lvds_info *lvds_info;
+       uint8_t frev, crev;
+       struct radeon_encoder_atom_dig *lvds = NULL;
+
+       atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+                              &crev, &data_offset);
+
+       lvds_info =
+           (union lvds_info *)(mode_info->atom_context->bios + data_offset);
+
+       if (lvds_info) {
+               lvds =
+                   kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+               if (!lvds)
+                       return NULL;
+
+               lvds->native_mode.dotclock =
+                   le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
+               lvds->native_mode.panel_xres =
+                   le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
+               lvds->native_mode.panel_yres =
+                   le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
+               lvds->native_mode.hblank =
+                   le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
+               lvds->native_mode.hoverplus =
+                   le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
+               lvds->native_mode.hsync_width =
+                   le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
+               lvds->native_mode.vblank =
+                   le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
+               lvds->native_mode.voverplus =
+                   le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
+               lvds->native_mode.vsync_width =
+                   le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+               lvds->panel_pwr_delay =
+                   le16_to_cpu(lvds_info->info.usOffDelayInMs);
+               lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+
+               encoder->native_mode = lvds->native_mode;
+       }
+       return lvds;
+}
+
+struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+       uint16_t data_offset;
+       struct _COMPASSIONATE_DATA *dac_info;
+       uint8_t frev, crev;
+       uint8_t bg, dac;
+       int i;
+       struct radeon_encoder_primary_dac *p_dac = NULL;
+
+       atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+
+       dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
+
+       if (dac_info) {
+               p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
+
+               if (!p_dac)
+                       return NULL;
+
+               bg = dac_info->ucDAC1_BG_Adjustment;
+               dac = dac_info->ucDAC1_DAC_Adjustment;
+               p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+
+       }
+       return p_dac;
+}
+
+struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+       uint16_t data_offset;
+       struct _COMPASSIONATE_DATA *dac_info;
+       uint8_t frev, crev;
+       uint8_t bg, dac;
+       int i;
+       struct radeon_encoder_tv_dac *tv_dac = NULL;
+
+       atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+
+       dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
+
+       if (dac_info) {
+               tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+
+               if (!tv_dac)
+                       return NULL;
+
+               bg = dac_info->ucDAC2_CRT2_BG_Adjustment;
+               dac = dac_info->ucDAC2_CRT2_DAC_Adjustment;
+               tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+               bg = dac_info->ucDAC2_PAL_BG_Adjustment;
+               dac = dac_info->ucDAC2_PAL_DAC_Adjustment;
+               tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+               bg = dac_info->ucDAC2_NTSC_BG_Adjustment;
+               dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
+               tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+
+       }
+       return tv_dac;
+}
+
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+       DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
+
+       args.ucEnable = enable;
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
+{
+       ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
+
+       args.ucEnable = enable;
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_set_engine_clock(struct radeon_device *rdev,
+                                 uint32_t eng_clock)
+{
+       SET_ENGINE_CLOCK_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
+
+       args.ulTargetEngineClock = eng_clock;   /* 10 khz */
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_set_memory_clock(struct radeon_device *rdev,
+                                 uint32_t mem_clock)
+{
+       SET_MEMORY_CLOCK_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
+
+       if (rdev->flags & RADEON_IS_IGP)
+               return;
+
+       args.ulTargetMemoryClock = mem_clock;   /* 10 khz */
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t bios_2_scratch, bios_6_scratch;
+
+       if (rdev->family >= CHIP_R600) {
+               bios_2_scratch = RREG32(R600_BIOS_0_SCRATCH);
+               bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+       } else {
+               bios_2_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+               bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+       }
+
+       /* let the bios control the backlight */
+       bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
+
+       /* tell the bios not to handle mode switching */
+       bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
+
+       if (rdev->family >= CHIP_R600) {
+               WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+               WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+       } else {
+               WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+               WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+       }
+
+}
+
+void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t bios_6_scratch;
+
+       if (rdev->family >= CHIP_R600)
+               bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+       else
+               bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+       if (lock)
+               bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
+       else
+               bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+
+       if (rdev->family >= CHIP_R600)
+               WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+       else
+               WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+/* at some point we may want to break this out into individual functions */
+void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+                                      struct drm_encoder *encoder,
+                                      bool connected)
+{
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_connector *radeon_connector =
+           to_radeon_connector(connector);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
+
+       if (rdev->family >= CHIP_R600) {
+               bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+               bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+               bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+       } else {
+               bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+               bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+               bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+       }
+
+       if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("TV1 connected\n");
+                       bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
+                       bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
+               } else {
+                       DRM_DEBUG("TV1 disconnected\n");
+                       bios_0_scratch &= ~ATOM_S0_TV1_MASK;
+                       bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
+                       bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("CV connected\n");
+                       bios_3_scratch |= ATOM_S3_CV_ACTIVE;
+                       bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
+               } else {
+                       DRM_DEBUG("CV disconnected\n");
+                       bios_0_scratch &= ~ATOM_S0_CV_MASK;
+                       bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
+                       bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("LCD1 connected\n");
+                       bios_0_scratch |= ATOM_S0_LCD1;
+                       bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
+                       bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
+               } else {
+                       DRM_DEBUG("LCD1 disconnected\n");
+                       bios_0_scratch &= ~ATOM_S0_LCD1;
+                       bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
+                       bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("CRT1 connected\n");
+                       bios_0_scratch |= ATOM_S0_CRT1_COLOR;
+                       bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
+                       bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
+               } else {
+                       DRM_DEBUG("CRT1 disconnected\n");
+                       bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
+                       bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
+                       bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("CRT2 connected\n");
+                       bios_0_scratch |= ATOM_S0_CRT2_COLOR;
+                       bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
+                       bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
+               } else {
+                       DRM_DEBUG("CRT2 disconnected\n");
+                       bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
+                       bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
+                       bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("DFP1 connected\n");
+                       bios_0_scratch |= ATOM_S0_DFP1;
+                       bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
+                       bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
+               } else {
+                       DRM_DEBUG("DFP1 disconnected\n");
+                       bios_0_scratch &= ~ATOM_S0_DFP1;
+                       bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
+                       bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("DFP2 connected\n");
+                       bios_0_scratch |= ATOM_S0_DFP2;
+                       bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
+                       bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
+               } else {
+                       DRM_DEBUG("DFP2 disconnected\n");
+                       bios_0_scratch &= ~ATOM_S0_DFP2;
+                       bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
+                       bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("DFP3 connected\n");
+                       bios_0_scratch |= ATOM_S0_DFP3;
+                       bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
+                       bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
+               } else {
+                       DRM_DEBUG("DFP3 disconnected\n");
+                       bios_0_scratch &= ~ATOM_S0_DFP3;
+                       bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
+                       bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("DFP4 connected\n");
+                       bios_0_scratch |= ATOM_S0_DFP4;
+                       bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
+                       bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
+               } else {
+                       DRM_DEBUG("DFP4 disconnected\n");
+                       bios_0_scratch &= ~ATOM_S0_DFP4;
+                       bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
+                       bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("DFP5 connected\n");
+                       bios_0_scratch |= ATOM_S0_DFP5;
+                       bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
+                       bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
+               } else {
+                       DRM_DEBUG("DFP5 disconnected\n");
+                       bios_0_scratch &= ~ATOM_S0_DFP5;
+                       bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
+                       bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
+               }
+       }
+
+       if (rdev->family >= CHIP_R600) {
+               WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
+               WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+               WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+       } else {
+               WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+               WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+               WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+       }
+}
+
+void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t bios_3_scratch;
+
+       if (rdev->family >= CHIP_R600)
+               bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+       else
+               bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+
+       if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+               bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE;
+               bios_3_scratch |= (crtc << 18);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+               bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE;
+               bios_3_scratch |= (crtc << 24);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+               bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE;
+               bios_3_scratch |= (crtc << 16);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+               bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE;
+               bios_3_scratch |= (crtc << 20);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+               bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE;
+               bios_3_scratch |= (crtc << 17);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+               bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE;
+               bios_3_scratch |= (crtc << 19);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+               bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE;
+               bios_3_scratch |= (crtc << 23);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+               bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE;
+               bios_3_scratch |= (crtc << 25);
+       }
+
+       if (rdev->family >= CHIP_R600)
+               WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+       else
+               WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+}
+
+void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t bios_2_scratch;
+
+       if (rdev->family >= CHIP_R600)
+               bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+       else
+               bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+       if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+               if (on)
+                       bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE;
+               else
+                       bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE;
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+               if (on)
+                       bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE;
+               else
+                       bios_2_scratch |= ATOM_S2_CV_DPMS_STATE;
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+               if (on)
+                       bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE;
+               else
+                       bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE;
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+               if (on)
+                       bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE;
+               else
+                       bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE;
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+               if (on)
+                       bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE;
+               else
+                       bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE;
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+               if (on)
+                       bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE;
+               else
+                       bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE;
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+               if (on)
+                       bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE;
+               else
+                       bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE;
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+               if (on)
+                       bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE;
+               else
+                       bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE;
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) {
+               if (on)
+                       bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE;
+               else
+                       bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE;
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) {
+               if (on)
+                       bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE;
+               else
+                       bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE;
+       }
+
+       if (rdev->family >= CHIP_R600)
+               WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+       else
+               WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
new file mode 100644 (file)
index 0000000..c44403a
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
+                          unsigned sdomain, unsigned ddomain)
+{
+       struct radeon_object *dobj = NULL;
+       struct radeon_object *sobj = NULL;
+       struct radeon_fence *fence = NULL;
+       uint64_t saddr, daddr;
+       unsigned long start_jiffies;
+       unsigned long end_jiffies;
+       unsigned long time;
+       unsigned i, n, size;
+       int r;
+
+       size = bsize;
+       n = 1024;
+       r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
+       if (r) {
+               goto out_cleanup;
+       }
+       r = radeon_object_pin(sobj, sdomain, &saddr);
+       if (r) {
+               goto out_cleanup;
+       }
+       r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
+       if (r) {
+               goto out_cleanup;
+       }
+       r = radeon_object_pin(dobj, ddomain, &daddr);
+       if (r) {
+               goto out_cleanup;
+       }
+       start_jiffies = jiffies;
+       for (i = 0; i < n; i++) {
+               r = radeon_fence_create(rdev, &fence);
+               if (r) {
+                       goto out_cleanup;
+               }
+               r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence);
+               if (r) {
+                       goto out_cleanup;
+               }
+               r = radeon_fence_wait(fence, false);
+               if (r) {
+                       goto out_cleanup;
+               }
+               radeon_fence_unref(&fence);
+       }
+       end_jiffies = jiffies;
+       time = end_jiffies - start_jiffies;
+       time = jiffies_to_msecs(time);
+       if (time > 0) {
+               i = ((n * size) >> 10) / time;
+               printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
+                      " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
+                      sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
+       }
+       start_jiffies = jiffies;
+       for (i = 0; i < n; i++) {
+               r = radeon_fence_create(rdev, &fence);
+               if (r) {
+                       goto out_cleanup;
+               }
+               r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence);
+               if (r) {
+                       goto out_cleanup;
+               }
+               r = radeon_fence_wait(fence, false);
+               if (r) {
+                       goto out_cleanup;
+               }
+               radeon_fence_unref(&fence);
+       }
+       end_jiffies = jiffies;
+       time = end_jiffies - start_jiffies;
+       time = jiffies_to_msecs(time);
+       if (time > 0) {
+               i = ((n * size) >> 10) / time;
+               printk(KERN_INFO "radeon: blit %u bo moves of %ukb from %d to %d"
+                      " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
+                      sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
+       }
+out_cleanup:
+       if (sobj) {
+               radeon_object_unpin(sobj);
+               radeon_object_unref(&sobj);
+       }
+       if (dobj) {
+               radeon_object_unpin(dobj);
+               radeon_object_unref(&dobj);
+       }
+       if (fence) {
+               radeon_fence_unref(&fence);
+       }
+       if (r) {
+               printk(KERN_WARNING "Error while benchmarking BO move.\n");
+       }
+}
+
+void radeon_benchmark(struct radeon_device *rdev)
+{
+       radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
+                             RADEON_GEM_DOMAIN_VRAM);
+       radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+                             RADEON_GEM_DOMAIN_GTT);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
new file mode 100644 (file)
index 0000000..96e37a6
--- /dev/null
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+/*
+ * BIOS.
+ */
+static bool radeon_read_bios(struct radeon_device *rdev)
+{
+       uint8_t __iomem *bios;
+       size_t size;
+
+       rdev->bios = NULL;
+       bios = pci_map_rom(rdev->pdev, &size);
+       if (!bios) {
+               return false;
+       }
+
+       if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+               pci_unmap_rom(rdev->pdev, bios);
+               return false;
+       }
+       rdev->bios = kmalloc(size, GFP_KERNEL);
+       if (rdev->bios == NULL) {
+               pci_unmap_rom(rdev->pdev, bios);
+               return false;
+       }
+       memcpy(rdev->bios, bios, size);
+       pci_unmap_rom(rdev->pdev, bios);
+       return true;
+}
+
+static bool r700_read_disabled_bios(struct radeon_device *rdev)
+{
+       uint32_t viph_control;
+       uint32_t bus_cntl;
+       uint32_t d1vga_control;
+       uint32_t d2vga_control;
+       uint32_t vga_render_control;
+       uint32_t rom_cntl;
+       uint32_t cg_spll_func_cntl = 0;
+       uint32_t cg_spll_status;
+       bool r;
+
+       viph_control = RREG32(RADEON_VIPH_CONTROL);
+       bus_cntl = RREG32(RADEON_BUS_CNTL);
+       d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+       d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+       vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+       rom_cntl = RREG32(R600_ROM_CNTL);
+
+       /* disable VIP */
+       WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+       /* enable the rom */
+       WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+       /* Disable VGA mode */
+       WREG32(AVIVO_D1VGA_CONTROL,
+              (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+               AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+       WREG32(AVIVO_D2VGA_CONTROL,
+              (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+               AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+       WREG32(AVIVO_VGA_RENDER_CONTROL,
+              (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+       if (rdev->family == CHIP_RV730) {
+               cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL);
+
+               /* enable bypass mode */
+               WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl |
+                                               R600_SPLL_BYPASS_EN));
+
+               /* wait for SPLL_CHG_STATUS to change to 1 */
+               cg_spll_status = 0;
+               while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+                       cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+
+               WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE));
+       } else
+               WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE));
+
+       r = radeon_read_bios(rdev);
+
+       /* restore regs */
+       if (rdev->family == CHIP_RV730) {
+               WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl);
+
+               /* wait for SPLL_CHG_STATUS to change to 1 */
+               cg_spll_status = 0;
+               while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+                       cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+       }
+       WREG32(RADEON_VIPH_CONTROL, viph_control);
+       WREG32(RADEON_BUS_CNTL, bus_cntl);
+       WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+       WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+       WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+       WREG32(R600_ROM_CNTL, rom_cntl);
+       return r;
+}
+
+static bool r600_read_disabled_bios(struct radeon_device *rdev)
+{
+       uint32_t viph_control;
+       uint32_t bus_cntl;
+       uint32_t d1vga_control;
+       uint32_t d2vga_control;
+       uint32_t vga_render_control;
+       uint32_t rom_cntl;
+       uint32_t general_pwrmgt;
+       uint32_t low_vid_lower_gpio_cntl;
+       uint32_t medium_vid_lower_gpio_cntl;
+       uint32_t high_vid_lower_gpio_cntl;
+       uint32_t ctxsw_vid_lower_gpio_cntl;
+       uint32_t lower_gpio_enable;
+       bool r;
+
+       viph_control = RREG32(RADEON_VIPH_CONTROL);
+       bus_cntl = RREG32(RADEON_BUS_CNTL);
+       d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+       d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+       vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+       rom_cntl = RREG32(R600_ROM_CNTL);
+       general_pwrmgt = RREG32(R600_GENERAL_PWRMGT);
+       low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL);
+       medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL);
+       high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL);
+       ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL);
+       lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE);
+
+       /* disable VIP */
+       WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+       /* enable the rom */
+       WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+       /* Disable VGA mode */
+       WREG32(AVIVO_D1VGA_CONTROL,
+              (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+               AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+       WREG32(AVIVO_D2VGA_CONTROL,
+              (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+               AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+       WREG32(AVIVO_VGA_RENDER_CONTROL,
+              (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+       WREG32(R600_ROM_CNTL,
+              ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) |
+               (1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) |
+               R600_SCK_OVERWRITE));
+
+       WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS));
+       WREG32(R600_LOW_VID_LOWER_GPIO_CNTL,
+              (low_vid_lower_gpio_cntl & ~0x400));
+       WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL,
+              (medium_vid_lower_gpio_cntl & ~0x400));
+       WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL,
+              (high_vid_lower_gpio_cntl & ~0x400));
+       WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL,
+              (ctxsw_vid_lower_gpio_cntl & ~0x400));
+       WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400));
+
+       r = radeon_read_bios(rdev);
+
+       /* restore regs */
+       WREG32(RADEON_VIPH_CONTROL, viph_control);
+       WREG32(RADEON_BUS_CNTL, bus_cntl);
+       WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+       WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+       WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+       WREG32(R600_ROM_CNTL, rom_cntl);
+       WREG32(R600_GENERAL_PWRMGT, general_pwrmgt);
+       WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl);
+       WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl);
+       WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl);
+       WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl);
+       WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable);
+       return r;
+}
+
+static bool avivo_read_disabled_bios(struct radeon_device *rdev)
+{
+       uint32_t seprom_cntl1;
+       uint32_t viph_control;
+       uint32_t bus_cntl;
+       uint32_t d1vga_control;
+       uint32_t d2vga_control;
+       uint32_t vga_render_control;
+       uint32_t gpiopad_a;
+       uint32_t gpiopad_en;
+       uint32_t gpiopad_mask;
+       bool r;
+
+       seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+       viph_control = RREG32(RADEON_VIPH_CONTROL);
+       bus_cntl = RREG32(RADEON_BUS_CNTL);
+       d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+       d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+       vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+       gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+       gpiopad_en = RREG32(RADEON_GPIOPAD_EN);
+       gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK);
+
+       WREG32(RADEON_SEPROM_CNTL1,
+              ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+               (0xc << RADEON_SCK_PRESCALE_SHIFT)));
+       WREG32(RADEON_GPIOPAD_A, 0);
+       WREG32(RADEON_GPIOPAD_EN, 0);
+       WREG32(RADEON_GPIOPAD_MASK, 0);
+
+       /* disable VIP */
+       WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+       /* enable the rom */
+       WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+
+       /* Disable VGA mode */
+       WREG32(AVIVO_D1VGA_CONTROL,
+              (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+               AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+       WREG32(AVIVO_D2VGA_CONTROL,
+              (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+               AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+       WREG32(AVIVO_VGA_RENDER_CONTROL,
+              (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+       r = radeon_read_bios(rdev);
+
+       /* restore regs */
+       WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+       WREG32(RADEON_VIPH_CONTROL, viph_control);
+       WREG32(RADEON_BUS_CNTL, bus_cntl);
+       WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+       WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+       WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+       WREG32(RADEON_GPIOPAD_A, gpiopad_a);
+       WREG32(RADEON_GPIOPAD_EN, gpiopad_en);
+       WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask);
+       return r;
+}
+
+static bool legacy_read_disabled_bios(struct radeon_device *rdev)
+{
+       uint32_t seprom_cntl1;
+       uint32_t viph_control;
+       uint32_t bus_cntl;
+       uint32_t crtc_gen_cntl;
+       uint32_t crtc2_gen_cntl;
+       uint32_t crtc_ext_cntl;
+       uint32_t fp2_gen_cntl;
+       bool r;
+
+       seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+       viph_control = RREG32(RADEON_VIPH_CONTROL);
+       bus_cntl = RREG32(RADEON_BUS_CNTL);
+       crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
+       crtc2_gen_cntl = 0;
+       crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+       fp2_gen_cntl = 0;
+
+       if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+               fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+       }
+
+       if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+               crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+       }
+
+       WREG32(RADEON_SEPROM_CNTL1,
+              ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+               (0xc << RADEON_SCK_PRESCALE_SHIFT)));
+
+       /* disable VIP */
+       WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+       /* enable the rom */
+       WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+
+       /* Turn off mem requests and CRTC for both controllers */
+       WREG32(RADEON_CRTC_GEN_CNTL,
+              ((crtc_gen_cntl & ~RADEON_CRTC_EN) |
+               (RADEON_CRTC_DISP_REQ_EN_B |
+                RADEON_CRTC_EXT_DISP_EN)));
+       if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+               WREG32(RADEON_CRTC2_GEN_CNTL,
+                      ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) |
+                       RADEON_CRTC2_DISP_REQ_EN_B));
+       }
+       /* Turn off CRTC */
+       WREG32(RADEON_CRTC_EXT_CNTL,
+              ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) |
+               (RADEON_CRTC_SYNC_TRISTAT |
+                RADEON_CRTC_DISPLAY_DIS)));
+
+       if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+               WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
+       }
+
+       r = radeon_read_bios(rdev);
+
+       /* restore regs */
+       WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+       WREG32(RADEON_VIPH_CONTROL, viph_control);
+       WREG32(RADEON_BUS_CNTL, bus_cntl);
+       WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+       if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+               WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+       }
+       WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+       if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+               WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+       }
+       return r;
+}
+
+static bool radeon_read_disabled_bios(struct radeon_device *rdev)
+{
+       if (rdev->family >= CHIP_RV770)
+               return r700_read_disabled_bios(rdev);
+       else if (rdev->family >= CHIP_R600)
+               return r600_read_disabled_bios(rdev);
+       else if (rdev->family >= CHIP_RS600)
+               return avivo_read_disabled_bios(rdev);
+       else
+               return legacy_read_disabled_bios(rdev);
+}
+
+bool radeon_get_bios(struct radeon_device *rdev)
+{
+       bool r;
+       uint16_t tmp;
+
+       r = radeon_read_bios(rdev);
+       if (r == false) {
+               r = radeon_read_disabled_bios(rdev);
+       }
+       if (r == false || rdev->bios == NULL) {
+               DRM_ERROR("Unable to locate a BIOS ROM\n");
+               rdev->bios = NULL;
+               return false;
+       }
+       if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+               goto free_bios;
+       }
+
+       rdev->bios_header_start = RBIOS16(0x48);
+       if (!rdev->bios_header_start) {
+               goto free_bios;
+       }
+       tmp = rdev->bios_header_start + 4;
+       if (!memcmp(rdev->bios + tmp, "ATOM", 4) ||
+           !memcmp(rdev->bios + tmp, "MOTA", 4)) {
+               rdev->is_atom_bios = true;
+       } else {
+               rdev->is_atom_bios = false;
+       }
+
+       DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM");
+       return true;
+free_bios:
+       kfree(rdev->bios);
+       rdev->bios = NULL;
+       return false;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
new file mode 100644 (file)
index 0000000..a37cbce
--- /dev/null
@@ -0,0 +1,833 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+/* 10 khz */
+static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
+{
+       struct radeon_pll *spll = &rdev->clock.spll;
+       uint32_t fb_div, ref_div, post_div, sclk;
+
+       fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+       fb_div = (fb_div >> RADEON_SPLL_FB_DIV_SHIFT) & RADEON_SPLL_FB_DIV_MASK;
+       fb_div <<= 1;
+       fb_div *= spll->reference_freq;
+
+       ref_div =
+           RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+       sclk = fb_div / ref_div;
+
+       post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
+       if (post_div == 2)
+               sclk >>= 1;
+       else if (post_div == 3)
+               sclk >>= 2;
+       else if (post_div == 4)
+               sclk >>= 4;
+
+       return sclk;
+}
+
+/* 10 khz */
+static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+{
+       struct radeon_pll *mpll = &rdev->clock.mpll;
+       uint32_t fb_div, ref_div, post_div, mclk;
+
+       fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+       fb_div = (fb_div >> RADEON_MPLL_FB_DIV_SHIFT) & RADEON_MPLL_FB_DIV_MASK;
+       fb_div <<= 1;
+       fb_div *= mpll->reference_freq;
+
+       ref_div =
+           RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+       mclk = fb_div / ref_div;
+
+       post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
+       if (post_div == 2)
+               mclk >>= 1;
+       else if (post_div == 3)
+               mclk >>= 2;
+       else if (post_div == 4)
+               mclk >>= 4;
+
+       return mclk;
+}
+
+void radeon_get_clock_info(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_pll *p1pll = &rdev->clock.p1pll;
+       struct radeon_pll *p2pll = &rdev->clock.p2pll;
+       struct radeon_pll *spll = &rdev->clock.spll;
+       struct radeon_pll *mpll = &rdev->clock.mpll;
+       int ret;
+
+       if (rdev->is_atom_bios)
+               ret = radeon_atom_get_clock_info(dev);
+       else
+               ret = radeon_combios_get_clock_info(dev);
+
+       if (ret) {
+               if (p1pll->reference_div < 2)
+                       p1pll->reference_div = 12;
+               if (p2pll->reference_div < 2)
+                       p2pll->reference_div = 12;
+               if (spll->reference_div < 2)
+                       spll->reference_div =
+                           RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+                           RADEON_M_SPLL_REF_DIV_MASK;
+               if (mpll->reference_div < 2)
+                       mpll->reference_div = spll->reference_div;
+       } else {
+               if (ASIC_IS_AVIVO(rdev)) {
+                       /* TODO FALLBACK */
+               } else {
+                       DRM_INFO("Using generic clock info\n");
+
+                       if (rdev->flags & RADEON_IS_IGP) {
+                               p1pll->reference_freq = 1432;
+                               p2pll->reference_freq = 1432;
+                               spll->reference_freq = 1432;
+                               mpll->reference_freq = 1432;
+                       } else {
+                               p1pll->reference_freq = 2700;
+                               p2pll->reference_freq = 2700;
+                               spll->reference_freq = 2700;
+                               mpll->reference_freq = 2700;
+                       }
+                       p1pll->reference_div =
+                           RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+                       if (p1pll->reference_div < 2)
+                               p1pll->reference_div = 12;
+                       p2pll->reference_div = p1pll->reference_div;
+
+                       if (rdev->family >= CHIP_R420) {
+                               p1pll->pll_in_min = 100;
+                               p1pll->pll_in_max = 1350;
+                               p1pll->pll_out_min = 20000;
+                               p1pll->pll_out_max = 50000;
+                               p2pll->pll_in_min = 100;
+                               p2pll->pll_in_max = 1350;
+                               p2pll->pll_out_min = 20000;
+                               p2pll->pll_out_max = 50000;
+                       } else {
+                               p1pll->pll_in_min = 40;
+                               p1pll->pll_in_max = 500;
+                               p1pll->pll_out_min = 12500;
+                               p1pll->pll_out_max = 35000;
+                               p2pll->pll_in_min = 40;
+                               p2pll->pll_in_max = 500;
+                               p2pll->pll_out_min = 12500;
+                               p2pll->pll_out_max = 35000;
+                       }
+
+                       spll->reference_div =
+                           RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+                           RADEON_M_SPLL_REF_DIV_MASK;
+                       mpll->reference_div = spll->reference_div;
+                       rdev->clock.default_sclk =
+                           radeon_legacy_get_engine_clock(rdev);
+                       rdev->clock.default_mclk =
+                           radeon_legacy_get_memory_clock(rdev);
+               }
+       }
+
+       /* pixel clocks */
+       if (ASIC_IS_AVIVO(rdev)) {
+               p1pll->min_post_div = 2;
+               p1pll->max_post_div = 0x7f;
+               p1pll->min_frac_feedback_div = 0;
+               p1pll->max_frac_feedback_div = 9;
+               p2pll->min_post_div = 2;
+               p2pll->max_post_div = 0x7f;
+               p2pll->min_frac_feedback_div = 0;
+               p2pll->max_frac_feedback_div = 9;
+       } else {
+               p1pll->min_post_div = 1;
+               p1pll->max_post_div = 16;
+               p1pll->min_frac_feedback_div = 0;
+               p1pll->max_frac_feedback_div = 0;
+               p2pll->min_post_div = 1;
+               p2pll->max_post_div = 12;
+               p2pll->min_frac_feedback_div = 0;
+               p2pll->max_frac_feedback_div = 0;
+       }
+
+       p1pll->min_ref_div = 2;
+       p1pll->max_ref_div = 0x3ff;
+       p1pll->min_feedback_div = 4;
+       p1pll->max_feedback_div = 0x7ff;
+       p1pll->best_vco = 0;
+
+       p2pll->min_ref_div = 2;
+       p2pll->max_ref_div = 0x3ff;
+       p2pll->min_feedback_div = 4;
+       p2pll->max_feedback_div = 0x7ff;
+       p2pll->best_vco = 0;
+
+       /* system clock */
+       spll->min_post_div = 1;
+       spll->max_post_div = 1;
+       spll->min_ref_div = 2;
+       spll->max_ref_div = 0xff;
+       spll->min_feedback_div = 4;
+       spll->max_feedback_div = 0xff;
+       spll->best_vco = 0;
+
+       /* memory clock */
+       mpll->min_post_div = 1;
+       mpll->max_post_div = 1;
+       mpll->min_ref_div = 2;
+       mpll->max_ref_div = 0xff;
+       mpll->min_feedback_div = 4;
+       mpll->max_feedback_div = 0xff;
+       mpll->best_vco = 0;
+
+}
+
+/* 10 khz */
+static uint32_t calc_eng_mem_clock(struct radeon_device *rdev,
+                                  uint32_t req_clock,
+                                  int *fb_div, int *post_div)
+{
+       struct radeon_pll *spll = &rdev->clock.spll;
+       int ref_div = spll->reference_div;
+
+       if (!ref_div)
+               ref_div =
+                   RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+                   RADEON_M_SPLL_REF_DIV_MASK;
+
+       if (req_clock < 15000) {
+               *post_div = 8;
+               req_clock *= 8;
+       } else if (req_clock < 30000) {
+               *post_div = 4;
+               req_clock *= 4;
+       } else if (req_clock < 60000) {
+               *post_div = 2;
+               req_clock *= 2;
+       } else
+               *post_div = 1;
+
+       req_clock *= ref_div;
+       req_clock += spll->reference_freq;
+       req_clock /= (2 * spll->reference_freq);
+
+       *fb_div = req_clock & 0xff;
+
+       req_clock = (req_clock & 0xffff) << 1;
+       req_clock *= spll->reference_freq;
+       req_clock /= ref_div;
+       req_clock /= *post_div;
+
+       return req_clock;
+}
+
+/* 10 khz */
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev,
+                                   uint32_t eng_clock)
+{
+       uint32_t tmp;
+       int fb_div, post_div;
+
+       /* XXX: wait for idle */
+
+       eng_clock = calc_eng_mem_clock(rdev, eng_clock, &fb_div, &post_div);
+
+       tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+       tmp &= ~RADEON_DONT_USE_XTALIN;
+       WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+       tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+       tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+       WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+       udelay(10);
+
+       tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+       tmp |= RADEON_SPLL_SLEEP;
+       WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+       udelay(2);
+
+       tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+       tmp |= RADEON_SPLL_RESET;
+       WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+       udelay(200);
+
+       tmp = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+       tmp &= ~(RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT);
+       tmp |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT;
+       WREG32_PLL(RADEON_M_SPLL_REF_FB_DIV, tmp);
+
+       /* XXX: verify on different asics */
+       tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+       tmp &= ~RADEON_SPLL_PVG_MASK;
+       if ((eng_clock * post_div) >= 90000)
+               tmp |= (0x7 << RADEON_SPLL_PVG_SHIFT);
+       else
+               tmp |= (0x4 << RADEON_SPLL_PVG_SHIFT);
+       WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+       tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+       tmp &= ~RADEON_SPLL_SLEEP;
+       WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+       udelay(2);
+
+       tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+       tmp &= ~RADEON_SPLL_RESET;
+       WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+       udelay(200);
+
+       tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+       tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+       switch (post_div) {
+       case 1:
+       default:
+               tmp |= 1;
+               break;
+       case 2:
+               tmp |= 2;
+               break;
+       case 4:
+               tmp |= 3;
+               break;
+       case 8:
+               tmp |= 4;
+               break;
+       }
+       WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+       udelay(20);
+
+       tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+       tmp |= RADEON_DONT_USE_XTALIN;
+       WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+       udelay(10);
+}
+
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+       uint32_t tmp;
+
+       if (enable) {
+               if (rdev->flags & RADEON_SINGLE_CRTC) {
+                       tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+                       if ((RREG32(RADEON_CONFIG_CNTL) &
+                            RADEON_CFG_ATI_REV_ID_MASK) >
+                           RADEON_CFG_ATI_REV_A13) {
+                               tmp &=
+                                   ~(RADEON_SCLK_FORCE_CP |
+                                     RADEON_SCLK_FORCE_RB);
+                       }
+                       tmp &=
+                           ~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 |
+                             RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE |
+                             RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE |
+                             RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM |
+                             RADEON_SCLK_FORCE_TDM);
+                       WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+               } else if (ASIC_IS_R300(rdev)) {
+                       if ((rdev->family == CHIP_RS400) ||
+                           (rdev->family == CHIP_RS480)) {
+                               tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+                               tmp &=
+                                   ~(RADEON_SCLK_FORCE_DISP2 |
+                                     RADEON_SCLK_FORCE_CP |
+                                     RADEON_SCLK_FORCE_HDP |
+                                     RADEON_SCLK_FORCE_DISP1 |
+                                     RADEON_SCLK_FORCE_TOP |
+                                     RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+                                     | RADEON_SCLK_FORCE_IDCT |
+                                     RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+                                     | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+                                     | R300_SCLK_FORCE_US |
+                                     RADEON_SCLK_FORCE_TV_SCLK |
+                                     R300_SCLK_FORCE_SU |
+                                     RADEON_SCLK_FORCE_OV0);
+                               tmp |= RADEON_DYN_STOP_LAT_MASK;
+                               tmp |=
+                                   RADEON_SCLK_FORCE_TOP |
+                                   RADEON_SCLK_FORCE_VIP;
+                               WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+                               tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+                               tmp &= ~RADEON_SCLK_MORE_FORCEON;
+                               tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+                               WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+                               tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+                               tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+                                       RADEON_PIXCLK_DAC_ALWAYS_ONb);
+                               WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+                               tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+                               tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+                                       RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+                                       RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+                                       R300_DVOCLK_ALWAYS_ONb |
+                                       RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+                                       RADEON_PIXCLK_GV_ALWAYS_ONb |
+                                       R300_PIXCLK_DVO_ALWAYS_ONb |
+                                       RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+                                       RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+                                       R300_PIXCLK_TRANS_ALWAYS_ONb |
+                                       R300_PIXCLK_TVO_ALWAYS_ONb |
+                                       R300_P2G2CLK_ALWAYS_ONb |
+                                       R300_P2G2CLK_ALWAYS_ONb);
+                               WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+                       } else if (rdev->family >= CHIP_RV350) {
+                               tmp = RREG32_PLL(R300_SCLK_CNTL2);
+                               tmp &= ~(R300_SCLK_FORCE_TCL |
+                                        R300_SCLK_FORCE_GA |
+                                        R300_SCLK_FORCE_CBA);
+                               tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
+                                       R300_SCLK_GA_MAX_DYN_STOP_LAT |
+                                       R300_SCLK_CBA_MAX_DYN_STOP_LAT);
+                               WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+                               tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+                               tmp &=
+                                   ~(RADEON_SCLK_FORCE_DISP2 |
+                                     RADEON_SCLK_FORCE_CP |
+                                     RADEON_SCLK_FORCE_HDP |
+                                     RADEON_SCLK_FORCE_DISP1 |
+                                     RADEON_SCLK_FORCE_TOP |
+                                     RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+                                     | RADEON_SCLK_FORCE_IDCT |
+                                     RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+                                     | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+                                     | R300_SCLK_FORCE_US |
+                                     RADEON_SCLK_FORCE_TV_SCLK |
+                                     R300_SCLK_FORCE_SU |
+                                     RADEON_SCLK_FORCE_OV0);
+                               tmp |= RADEON_DYN_STOP_LAT_MASK;
+                               WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+                               tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+                               tmp &= ~RADEON_SCLK_MORE_FORCEON;
+                               tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+                               WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+                               tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+                               tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+                                       RADEON_PIXCLK_DAC_ALWAYS_ONb);
+                               WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+                               tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+                               tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+                                       RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+                                       RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+                                       R300_DVOCLK_ALWAYS_ONb |
+                                       RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+                                       RADEON_PIXCLK_GV_ALWAYS_ONb |
+                                       R300_PIXCLK_DVO_ALWAYS_ONb |
+                                       RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+                                       RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+                                       R300_PIXCLK_TRANS_ALWAYS_ONb |
+                                       R300_PIXCLK_TVO_ALWAYS_ONb |
+                                       R300_P2G2CLK_ALWAYS_ONb |
+                                       R300_P2G2CLK_ALWAYS_ONb);
+                               WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+                               tmp = RREG32_PLL(RADEON_MCLK_MISC);
+                               tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
+                                       RADEON_IO_MCLK_DYN_ENABLE);
+                               WREG32_PLL(RADEON_MCLK_MISC, tmp);
+
+                               tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+                               tmp |= (RADEON_FORCEON_MCLKA |
+                                       RADEON_FORCEON_MCLKB);
+
+                               tmp &= ~(RADEON_FORCEON_YCLKA |
+                                        RADEON_FORCEON_YCLKB |
+                                        RADEON_FORCEON_MC);
+
+                               /* Some releases of vbios have set DISABLE_MC_MCLKA
+                                  and DISABLE_MC_MCLKB bits in the vbios table.  Setting these
+                                  bits will cause H/W hang when reading video memory with dynamic clocking
+                                  enabled. */
+                               if ((tmp & R300_DISABLE_MC_MCLKA) &&
+                                   (tmp & R300_DISABLE_MC_MCLKB)) {
+                                       /* If both bits are set, then check the active channels */
+                                       tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+                                       if (rdev->mc.vram_width == 64) {
+                                               if (RREG32(RADEON_MEM_CNTL) &
+                                                   R300_MEM_USE_CD_CH_ONLY)
+                                                       tmp &=
+                                                           ~R300_DISABLE_MC_MCLKB;
+                                               else
+                                                       tmp &=
+                                                           ~R300_DISABLE_MC_MCLKA;
+                                       } else {
+                                               tmp &= ~(R300_DISABLE_MC_MCLKA |
+                                                        R300_DISABLE_MC_MCLKB);
+                                       }
+                               }
+
+                               WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+                       } else {
+                               tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+                               tmp &= ~(R300_SCLK_FORCE_VAP);
+                               tmp |= RADEON_SCLK_FORCE_CP;
+                               WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+                               udelay(15000);
+
+                               tmp = RREG32_PLL(R300_SCLK_CNTL2);
+                               tmp &= ~(R300_SCLK_FORCE_TCL |
+                                        R300_SCLK_FORCE_GA |
+                                        R300_SCLK_FORCE_CBA);
+                               WREG32_PLL(R300_SCLK_CNTL2, tmp);
+                       }
+               } else {
+                       tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+
+                       tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK |
+                                RADEON_DISP_DYN_STOP_LAT_MASK |
+                                RADEON_DYN_STOP_MODE_MASK);
+
+                       tmp |= (RADEON_ENGIN_DYNCLK_MODE |
+                               (0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
+                       WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
+                       udelay(15000);
+
+                       tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+                       tmp |= RADEON_SCLK_DYN_START_CNTL;
+                       WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+                       udelay(15000);
+
+                       /* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
+                          to lockup randomly, leave them as set by BIOS.
+                        */
+                       tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+                       /*tmp &= RADEON_SCLK_SRC_SEL_MASK; */
+                       tmp &= ~RADEON_SCLK_FORCEON_MASK;
+
+                       /*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300 */
+                       if (((rdev->family == CHIP_RV250) &&
+                            ((RREG32(RADEON_CONFIG_CNTL) &
+                              RADEON_CFG_ATI_REV_ID_MASK) <
+                             RADEON_CFG_ATI_REV_A13))
+                           || ((rdev->family == CHIP_RV100)
+                               &&
+                               ((RREG32(RADEON_CONFIG_CNTL) &
+                                 RADEON_CFG_ATI_REV_ID_MASK) <=
+                                RADEON_CFG_ATI_REV_A13))) {
+                               tmp |= RADEON_SCLK_FORCE_CP;
+                               tmp |= RADEON_SCLK_FORCE_VIP;
+                       }
+
+                       WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+                       if ((rdev->family == CHIP_RV200) ||
+                           (rdev->family == CHIP_RV250) ||
+                           (rdev->family == CHIP_RV280)) {
+                               tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+                               tmp &= ~RADEON_SCLK_MORE_FORCEON;
+
+                               /* RV200::A11 A12 RV250::A11 A12 */
+                               if (((rdev->family == CHIP_RV200) ||
+                                    (rdev->family == CHIP_RV250)) &&
+                                   ((RREG32(RADEON_CONFIG_CNTL) &
+                                     RADEON_CFG_ATI_REV_ID_MASK) <
+                                    RADEON_CFG_ATI_REV_A13)) {
+                                       tmp |= RADEON_SCLK_MORE_FORCEON;
+                               }
+                               WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+                               udelay(15000);
+                       }
+
+                       /* RV200::A11 A12, RV250::A11 A12 */
+                       if (((rdev->family == CHIP_RV200) ||
+                            (rdev->family == CHIP_RV250)) &&
+                           ((RREG32(RADEON_CONFIG_CNTL) &
+                             RADEON_CFG_ATI_REV_ID_MASK) <
+                            RADEON_CFG_ATI_REV_A13)) {
+                               tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+                               tmp |= RADEON_TCL_BYPASS_DISABLE;
+                               WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+                       }
+                       udelay(15000);
+
+                       /*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
+                       tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+                       tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+                               RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+                               RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+                               RADEON_PIXCLK_GV_ALWAYS_ONb |
+                               RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+                               RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+                               RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+                       WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+                       udelay(15000);
+
+                       tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+                       tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+                               RADEON_PIXCLK_DAC_ALWAYS_ONb);
+
+                       WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+                       udelay(15000);
+               }
+       } else {
+               /* Turn everything OFF (ForceON to everything) */
+               if (rdev->flags & RADEON_SINGLE_CRTC) {
+                       tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+                       tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP |
+                               RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP
+                               | RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE |
+                               RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
+                               RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB |
+                               RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM |
+                               RADEON_SCLK_FORCE_RB);
+                       WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+               } else if ((rdev->family == CHIP_RS400) ||
+                          (rdev->family == CHIP_RS480)) {
+                       tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+                       tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+                               RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+                               | RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+                               R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+                               RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+                               R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+                               R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+                               R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+                       WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+                       tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+                       tmp |= RADEON_SCLK_MORE_FORCEON;
+                       WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+                       tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+                       tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+                                RADEON_PIXCLK_DAC_ALWAYS_ONb |
+                                R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+                       WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+                       tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+                       tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+                                RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+                                RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+                                R300_DVOCLK_ALWAYS_ONb |
+                                RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+                                RADEON_PIXCLK_GV_ALWAYS_ONb |
+                                R300_PIXCLK_DVO_ALWAYS_ONb |
+                                RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+                                RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+                                R300_PIXCLK_TRANS_ALWAYS_ONb |
+                                R300_PIXCLK_TVO_ALWAYS_ONb |
+                                R300_P2G2CLK_ALWAYS_ONb |
+                                R300_P2G2CLK_ALWAYS_ONb |
+                                R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+                       WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+               } else if (rdev->family >= CHIP_RV350) {
+                       /* for RV350/M10, no delays are required. */
+                       tmp = RREG32_PLL(R300_SCLK_CNTL2);
+                       tmp |= (R300_SCLK_FORCE_TCL |
+                               R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA);
+                       WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+                       tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+                       tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+                               RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+                               | RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+                               R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+                               RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+                               R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+                               R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+                               R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+                       WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+                       tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+                       tmp |= RADEON_SCLK_MORE_FORCEON;
+                       WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+                       tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+                       tmp |= (RADEON_FORCEON_MCLKA |
+                               RADEON_FORCEON_MCLKB |
+                               RADEON_FORCEON_YCLKA |
+                               RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC);
+                       WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+
+                       tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+                       tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+                                RADEON_PIXCLK_DAC_ALWAYS_ONb |
+                                R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+                       WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+                       tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+                       tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+                                RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+                                RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+                                R300_DVOCLK_ALWAYS_ONb |
+                                RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+                                RADEON_PIXCLK_GV_ALWAYS_ONb |
+                                R300_PIXCLK_DVO_ALWAYS_ONb |
+                                RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+                                RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+                                R300_PIXCLK_TRANS_ALWAYS_ONb |
+                                R300_PIXCLK_TVO_ALWAYS_ONb |
+                                R300_P2G2CLK_ALWAYS_ONb |
+                                R300_P2G2CLK_ALWAYS_ONb |
+                                R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+                       WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+               } else {
+                       tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+                       tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
+                       tmp |= RADEON_SCLK_FORCE_SE;
+
+                       if (rdev->flags & RADEON_SINGLE_CRTC) {
+                               tmp |= (RADEON_SCLK_FORCE_RB |
+                                       RADEON_SCLK_FORCE_TDM |
+                                       RADEON_SCLK_FORCE_TAM |
+                                       RADEON_SCLK_FORCE_PB |
+                                       RADEON_SCLK_FORCE_RE |
+                                       RADEON_SCLK_FORCE_VIP |
+                                       RADEON_SCLK_FORCE_IDCT |
+                                       RADEON_SCLK_FORCE_TOP |
+                                       RADEON_SCLK_FORCE_DISP1 |
+                                       RADEON_SCLK_FORCE_DISP2 |
+                                       RADEON_SCLK_FORCE_HDP);
+                       } else if ((rdev->family == CHIP_R300) ||
+                                  (rdev->family == CHIP_R350)) {
+                               tmp |= (RADEON_SCLK_FORCE_HDP |
+                                       RADEON_SCLK_FORCE_DISP1 |
+                                       RADEON_SCLK_FORCE_DISP2 |
+                                       RADEON_SCLK_FORCE_TOP |
+                                       RADEON_SCLK_FORCE_IDCT |
+                                       RADEON_SCLK_FORCE_VIP);
+                       }
+                       WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+                       udelay(16000);
+
+                       if ((rdev->family == CHIP_R300) ||
+                           (rdev->family == CHIP_R350)) {
+                               tmp = RREG32_PLL(R300_SCLK_CNTL2);
+                               tmp |= (R300_SCLK_FORCE_TCL |
+                                       R300_SCLK_FORCE_GA |
+                                       R300_SCLK_FORCE_CBA);
+                               WREG32_PLL(R300_SCLK_CNTL2, tmp);
+                               udelay(16000);
+                       }
+
+                       if (rdev->flags & RADEON_IS_IGP) {
+                               tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+                               tmp &= ~(RADEON_FORCEON_MCLKA |
+                                        RADEON_FORCEON_YCLKA);
+                               WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+                               udelay(16000);
+                       }
+
+                       if ((rdev->family == CHIP_RV200) ||
+                           (rdev->family == CHIP_RV250) ||
+                           (rdev->family == CHIP_RV280)) {
+                               tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+                               tmp |= RADEON_SCLK_MORE_FORCEON;
+                               WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+                               udelay(16000);
+                       }
+
+                       tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+                       tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+                                RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+                                RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+                                RADEON_PIXCLK_GV_ALWAYS_ONb |
+                                RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+                                RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+                                RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+                       WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+                       udelay(16000);
+
+                       tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+                       tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+                                RADEON_PIXCLK_DAC_ALWAYS_ONb);
+                       WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+               }
+       }
+}
+
+static void radeon_apply_clock_quirks(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       /* XXX make sure engine is idle */
+
+       if (rdev->family < CHIP_RS600) {
+               tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+               if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
+                       tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
+               if ((rdev->family == CHIP_RV250)
+                   || (rdev->family == CHIP_RV280))
+                       tmp |=
+                           RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
+               if ((rdev->family == CHIP_RV350)
+                   || (rdev->family == CHIP_RV380))
+                       tmp |= R300_SCLK_FORCE_VAP;
+               if (rdev->family == CHIP_R420)
+                       tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
+               WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+       } else if (rdev->family < CHIP_R600) {
+               tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
+               tmp |= AVIVO_CP_FORCEON;
+               WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
+
+               tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
+               tmp |= AVIVO_E2_FORCEON;
+               WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
+
+               tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
+               tmp |= AVIVO_IDCT_FORCEON;
+               WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
+       }
+}
+
+int radeon_static_clocks_init(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+
+       /* XXX make sure engine is idle */
+
+       if (radeon_dynclks != -1) {
+               if (radeon_dynclks)
+                       radeon_set_clock_gating(rdev, 1);
+       }
+       radeon_apply_clock_quirks(rdev);
+       return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
new file mode 100644 (file)
index 0000000..06e8038
--- /dev/null
@@ -0,0 +1,2481 @@
+/*
+ * Copyright 2004 ATI Technologies Inc., Markham, Ontario
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+#ifdef CONFIG_PPC_PMAC
+/* not sure which of these are needed */
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#endif /* CONFIG_PPC_PMAC */
+
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
+                     uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+                           uint32_t connector_id,
+                           uint32_t supported_device,
+                           int connector_type,
+                           struct radeon_i2c_bus_rec *i2c_bus);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+                         uint32_t supported_device);
+
+/* old legacy ATI BIOS routines */
+
+/* COMBIOS table offsets */
+enum radeon_combios_table_offset {
+       /* absolute offset tables */
+       COMBIOS_ASIC_INIT_1_TABLE,
+       COMBIOS_BIOS_SUPPORT_TABLE,
+       COMBIOS_DAC_PROGRAMMING_TABLE,
+       COMBIOS_MAX_COLOR_DEPTH_TABLE,
+       COMBIOS_CRTC_INFO_TABLE,
+       COMBIOS_PLL_INFO_TABLE,
+       COMBIOS_TV_INFO_TABLE,
+       COMBIOS_DFP_INFO_TABLE,
+       COMBIOS_HW_CONFIG_INFO_TABLE,
+       COMBIOS_MULTIMEDIA_INFO_TABLE,
+       COMBIOS_TV_STD_PATCH_TABLE,
+       COMBIOS_LCD_INFO_TABLE,
+       COMBIOS_MOBILE_INFO_TABLE,
+       COMBIOS_PLL_INIT_TABLE,
+       COMBIOS_MEM_CONFIG_TABLE,
+       COMBIOS_SAVE_MASK_TABLE,
+       COMBIOS_HARDCODED_EDID_TABLE,
+       COMBIOS_ASIC_INIT_2_TABLE,
+       COMBIOS_CONNECTOR_INFO_TABLE,
+       COMBIOS_DYN_CLK_1_TABLE,
+       COMBIOS_RESERVED_MEM_TABLE,
+       COMBIOS_EXT_TMDS_INFO_TABLE,
+       COMBIOS_MEM_CLK_INFO_TABLE,
+       COMBIOS_EXT_DAC_INFO_TABLE,
+       COMBIOS_MISC_INFO_TABLE,
+       COMBIOS_CRT_INFO_TABLE,
+       COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE,
+       COMBIOS_COMPONENT_VIDEO_INFO_TABLE,
+       COMBIOS_FAN_SPEED_INFO_TABLE,
+       COMBIOS_OVERDRIVE_INFO_TABLE,
+       COMBIOS_OEM_INFO_TABLE,
+       COMBIOS_DYN_CLK_2_TABLE,
+       COMBIOS_POWER_CONNECTOR_INFO_TABLE,
+       COMBIOS_I2C_INFO_TABLE,
+       /* relative offset tables */
+       COMBIOS_ASIC_INIT_3_TABLE,      /* offset from misc info */
+       COMBIOS_ASIC_INIT_4_TABLE,      /* offset from misc info */
+       COMBIOS_DETECTED_MEM_TABLE,     /* offset from misc info */
+       COMBIOS_ASIC_INIT_5_TABLE,      /* offset from misc info */
+       COMBIOS_RAM_RESET_TABLE,        /* offset from mem config */
+       COMBIOS_POWERPLAY_INFO_TABLE,   /* offset from mobile info */
+       COMBIOS_GPIO_INFO_TABLE,        /* offset from mobile info */
+       COMBIOS_LCD_DDC_INFO_TABLE,     /* offset from mobile info */
+       COMBIOS_TMDS_POWER_TABLE,       /* offset from mobile info */
+       COMBIOS_TMDS_POWER_ON_TABLE,    /* offset from tmds power */
+       COMBIOS_TMDS_POWER_OFF_TABLE,   /* offset from tmds power */
+};
+
+enum radeon_combios_ddc {
+       DDC_NONE_DETECTED,
+       DDC_MONID,
+       DDC_DVI,
+       DDC_VGA,
+       DDC_CRT2,
+       DDC_LCD,
+       DDC_GPIO,
+};
+
+enum radeon_combios_connector {
+       CONNECTOR_NONE_LEGACY,
+       CONNECTOR_PROPRIETARY_LEGACY,
+       CONNECTOR_CRT_LEGACY,
+       CONNECTOR_DVI_I_LEGACY,
+       CONNECTOR_DVI_D_LEGACY,
+       CONNECTOR_CTV_LEGACY,
+       CONNECTOR_STV_LEGACY,
+       CONNECTOR_UNSUPPORTED_LEGACY
+};
+
+const int legacy_connector_convert[] = {
+       DRM_MODE_CONNECTOR_Unknown,
+       DRM_MODE_CONNECTOR_DVID,
+       DRM_MODE_CONNECTOR_VGA,
+       DRM_MODE_CONNECTOR_DVII,
+       DRM_MODE_CONNECTOR_DVID,
+       DRM_MODE_CONNECTOR_Composite,
+       DRM_MODE_CONNECTOR_SVIDEO,
+       DRM_MODE_CONNECTOR_Unknown,
+};
+
+static uint16_t combios_get_table_offset(struct drm_device *dev,
+                                        enum radeon_combios_table_offset table)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       int rev;
+       uint16_t offset = 0, check_offset;
+
+       switch (table) {
+               /* absolute offset tables */
+       case COMBIOS_ASIC_INIT_1_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0xc);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_BIOS_SUPPORT_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x14);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_DAC_PROGRAMMING_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_MAX_COLOR_DEPTH_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_CRTC_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_PLL_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x30);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_TV_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x32);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_DFP_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x34);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_HW_CONFIG_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x36);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_MULTIMEDIA_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x38);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_TV_STD_PATCH_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_LCD_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x40);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_MOBILE_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x42);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_PLL_INIT_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x46);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_MEM_CONFIG_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x48);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_SAVE_MASK_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_HARDCODED_EDID_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_ASIC_INIT_2_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_CONNECTOR_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x50);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_DYN_CLK_1_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x52);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_RESERVED_MEM_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x54);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_EXT_TMDS_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x58);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_MEM_CLK_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_EXT_DAC_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_MISC_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_CRT_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x60);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x62);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x64);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_FAN_SPEED_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x66);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_OVERDRIVE_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x68);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_OEM_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_DYN_CLK_2_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+       case COMBIOS_I2C_INFO_TABLE:
+               check_offset = RBIOS16(rdev->bios_header_start + 0x70);
+               if (check_offset)
+                       offset = check_offset;
+               break;
+               /* relative offset tables */
+       case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+               if (check_offset) {
+                       rev = RBIOS8(check_offset);
+                       if (rev > 0) {
+                               check_offset = RBIOS16(check_offset + 0x3);
+                               if (check_offset)
+                                       offset = check_offset;
+                       }
+               }
+               break;
+       case COMBIOS_ASIC_INIT_4_TABLE: /* offset from misc info */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+               if (check_offset) {
+                       rev = RBIOS8(check_offset);
+                       if (rev > 0) {
+                               check_offset = RBIOS16(check_offset + 0x5);
+                               if (check_offset)
+                                       offset = check_offset;
+                       }
+               }
+               break;
+       case COMBIOS_DETECTED_MEM_TABLE:        /* offset from misc info */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+               if (check_offset) {
+                       rev = RBIOS8(check_offset);
+                       if (rev > 0) {
+                               check_offset = RBIOS16(check_offset + 0x7);
+                               if (check_offset)
+                                       offset = check_offset;
+                       }
+               }
+               break;
+       case COMBIOS_ASIC_INIT_5_TABLE: /* offset from misc info */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+               if (check_offset) {
+                       rev = RBIOS8(check_offset);
+                       if (rev == 2) {
+                               check_offset = RBIOS16(check_offset + 0x9);
+                               if (check_offset)
+                                       offset = check_offset;
+                       }
+               }
+               break;
+       case COMBIOS_RAM_RESET_TABLE:   /* offset from mem config */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+               if (check_offset) {
+                       while (RBIOS8(check_offset++));
+                       check_offset += 2;
+                       if (check_offset)
+                               offset = check_offset;
+               }
+               break;
+       case COMBIOS_POWERPLAY_INFO_TABLE:      /* offset from mobile info */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+               if (check_offset) {
+                       check_offset = RBIOS16(check_offset + 0x11);
+                       if (check_offset)
+                               offset = check_offset;
+               }
+               break;
+       case COMBIOS_GPIO_INFO_TABLE:   /* offset from mobile info */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+               if (check_offset) {
+                       check_offset = RBIOS16(check_offset + 0x13);
+                       if (check_offset)
+                               offset = check_offset;
+               }
+               break;
+       case COMBIOS_LCD_DDC_INFO_TABLE:        /* offset from mobile info */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+               if (check_offset) {
+                       check_offset = RBIOS16(check_offset + 0x15);
+                       if (check_offset)
+                               offset = check_offset;
+               }
+               break;
+       case COMBIOS_TMDS_POWER_TABLE:  /* offset from mobile info */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+               if (check_offset) {
+                       check_offset = RBIOS16(check_offset + 0x17);
+                       if (check_offset)
+                               offset = check_offset;
+               }
+               break;
+       case COMBIOS_TMDS_POWER_ON_TABLE:       /* offset from tmds power */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+               if (check_offset) {
+                       check_offset = RBIOS16(check_offset + 0x2);
+                       if (check_offset)
+                               offset = check_offset;
+               }
+               break;
+       case COMBIOS_TMDS_POWER_OFF_TABLE:      /* offset from tmds power */
+               check_offset =
+                   combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+               if (check_offset) {
+                       check_offset = RBIOS16(check_offset + 0x4);
+                       if (check_offset)
+                               offset = check_offset;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return offset;
+
+}
+
+struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line)
+{
+       struct radeon_i2c_bus_rec i2c;
+
+       i2c.mask_clk_mask = RADEON_GPIO_EN_1;
+       i2c.mask_data_mask = RADEON_GPIO_EN_0;
+       i2c.a_clk_mask = RADEON_GPIO_A_1;
+       i2c.a_data_mask = RADEON_GPIO_A_0;
+       i2c.put_clk_mask = RADEON_GPIO_EN_1;
+       i2c.put_data_mask = RADEON_GPIO_EN_0;
+       i2c.get_clk_mask = RADEON_GPIO_Y_1;
+       i2c.get_data_mask = RADEON_GPIO_Y_0;
+       if ((ddc_line == RADEON_LCD_GPIO_MASK) ||
+           (ddc_line == RADEON_MDGPIO_EN_REG)) {
+               i2c.mask_clk_reg = ddc_line;
+               i2c.mask_data_reg = ddc_line;
+               i2c.a_clk_reg = ddc_line;
+               i2c.a_data_reg = ddc_line;
+               i2c.put_clk_reg = ddc_line;
+               i2c.put_data_reg = ddc_line;
+               i2c.get_clk_reg = ddc_line + 4;
+               i2c.get_data_reg = ddc_line + 4;
+       } else {
+               i2c.mask_clk_reg = ddc_line;
+               i2c.mask_data_reg = ddc_line;
+               i2c.a_clk_reg = ddc_line;
+               i2c.a_data_reg = ddc_line;
+               i2c.put_clk_reg = ddc_line;
+               i2c.put_data_reg = ddc_line;
+               i2c.get_clk_reg = ddc_line;
+               i2c.get_data_reg = ddc_line;
+       }
+
+       if (ddc_line)
+               i2c.valid = true;
+       else
+               i2c.valid = false;
+
+       return i2c;
+}
+
+bool radeon_combios_get_clock_info(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       uint16_t pll_info;
+       struct radeon_pll *p1pll = &rdev->clock.p1pll;
+       struct radeon_pll *p2pll = &rdev->clock.p2pll;
+       struct radeon_pll *spll = &rdev->clock.spll;
+       struct radeon_pll *mpll = &rdev->clock.mpll;
+       int8_t rev;
+       uint16_t sclk, mclk;
+
+       if (rdev->bios == NULL)
+               return NULL;
+
+       pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
+       if (pll_info) {
+               rev = RBIOS8(pll_info);
+
+               /* pixel clocks */
+               p1pll->reference_freq = RBIOS16(pll_info + 0xe);
+               p1pll->reference_div = RBIOS16(pll_info + 0x10);
+               p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
+               p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
+
+               if (rev > 9) {
+                       p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
+                       p1pll->pll_in_max = RBIOS32(pll_info + 0x3a);
+               } else {
+                       p1pll->pll_in_min = 40;
+                       p1pll->pll_in_max = 500;
+               }
+               *p2pll = *p1pll;
+
+               /* system clock */
+               spll->reference_freq = RBIOS16(pll_info + 0x1a);
+               spll->reference_div = RBIOS16(pll_info + 0x1c);
+               spll->pll_out_min = RBIOS32(pll_info + 0x1e);
+               spll->pll_out_max = RBIOS32(pll_info + 0x22);
+
+               if (rev > 10) {
+                       spll->pll_in_min = RBIOS32(pll_info + 0x48);
+                       spll->pll_in_max = RBIOS32(pll_info + 0x4c);
+               } else {
+                       /* ??? */
+                       spll->pll_in_min = 40;
+                       spll->pll_in_max = 500;
+               }
+
+               /* memory clock */
+               mpll->reference_freq = RBIOS16(pll_info + 0x26);
+               mpll->reference_div = RBIOS16(pll_info + 0x28);
+               mpll->pll_out_min = RBIOS32(pll_info + 0x2a);
+               mpll->pll_out_max = RBIOS32(pll_info + 0x2e);
+
+               if (rev > 10) {
+                       mpll->pll_in_min = RBIOS32(pll_info + 0x5a);
+                       mpll->pll_in_max = RBIOS32(pll_info + 0x5e);
+               } else {
+                       /* ??? */
+                       mpll->pll_in_min = 40;
+                       mpll->pll_in_max = 500;
+               }
+
+               /* default sclk/mclk */
+               sclk = RBIOS16(pll_info + 0xa);
+               mclk = RBIOS16(pll_info + 0x8);
+               if (sclk == 0)
+                       sclk = 200 * 100;
+               if (mclk == 0)
+                       mclk = 200 * 100;
+
+               rdev->clock.default_sclk = sclk;
+               rdev->clock.default_mclk = mclk;
+
+               return true;
+       }
+       return false;
+}
+
+struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+                                                                      radeon_encoder
+                                                                      *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint16_t dac_info;
+       uint8_t rev, bg, dac;
+       struct radeon_encoder_primary_dac *p_dac = NULL;
+
+       if (rdev->bios == NULL)
+               return NULL;
+
+       /* check CRT table */
+       dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+       if (dac_info) {
+               p_dac =
+                   kzalloc(sizeof(struct radeon_encoder_primary_dac),
+                           GFP_KERNEL);
+
+               if (!p_dac)
+                       return NULL;
+
+               rev = RBIOS8(dac_info) & 0x3;
+               if (rev < 2) {
+                       bg = RBIOS8(dac_info + 0x2) & 0xf;
+                       dac = (RBIOS8(dac_info + 0x2) >> 4) & 0xf;
+                       p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+               } else {
+                       bg = RBIOS8(dac_info + 0x2) & 0xf;
+                       dac = RBIOS8(dac_info + 0x3) & 0xf;
+                       p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+               }
+
+       }
+
+       return p_dac;
+}
+
+static enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint16_t tv_info;
+       enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+       tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+       if (tv_info) {
+               if (RBIOS8(tv_info + 6) == 'T') {
+                       switch (RBIOS8(tv_info + 7) & 0xf) {
+                       case 1:
+                               tv_std = TV_STD_NTSC;
+                               DRM_INFO("Default TV standard: NTSC\n");
+                               break;
+                       case 2:
+                               tv_std = TV_STD_PAL;
+                               DRM_INFO("Default TV standard: PAL\n");
+                               break;
+                       case 3:
+                               tv_std = TV_STD_PAL_M;
+                               DRM_INFO("Default TV standard: PAL-M\n");
+                               break;
+                       case 4:
+                               tv_std = TV_STD_PAL_60;
+                               DRM_INFO("Default TV standard: PAL-60\n");
+                               break;
+                       case 5:
+                               tv_std = TV_STD_NTSC_J;
+                               DRM_INFO("Default TV standard: NTSC-J\n");
+                               break;
+                       case 6:
+                               tv_std = TV_STD_SCART_PAL;
+                               DRM_INFO("Default TV standard: SCART-PAL\n");
+                               break;
+                       default:
+                               tv_std = TV_STD_NTSC;
+                               DRM_INFO
+                                   ("Unknown TV standard; defaulting to NTSC\n");
+                               break;
+                       }
+
+                       switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
+                       case 0:
+                               DRM_INFO("29.498928713 MHz TV ref clk\n");
+                               break;
+                       case 1:
+                               DRM_INFO("28.636360000 MHz TV ref clk\n");
+                               break;
+                       case 2:
+                               DRM_INFO("14.318180000 MHz TV ref clk\n");
+                               break;
+                       case 3:
+                               DRM_INFO("27.000000000 MHz TV ref clk\n");
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+       return tv_std;
+}
+
+static const uint32_t default_tvdac_adj[CHIP_LAST] = {
+       0x00000000,             /* r100  */
+       0x00280000,             /* rv100 */
+       0x00000000,             /* rs100 */
+       0x00880000,             /* rv200 */
+       0x00000000,             /* rs200 */
+       0x00000000,             /* r200  */
+       0x00770000,             /* rv250 */
+       0x00290000,             /* rs300 */
+       0x00560000,             /* rv280 */
+       0x00780000,             /* r300  */
+       0x00770000,             /* r350  */
+       0x00780000,             /* rv350 */
+       0x00780000,             /* rv380 */
+       0x01080000,             /* r420  */
+       0x01080000,             /* r423  */
+       0x01080000,             /* rv410 */
+       0x00780000,             /* rs400 */
+       0x00780000,             /* rs480 */
+};
+
+static struct radeon_encoder_tv_dac
+    *radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev)
+{
+       struct radeon_encoder_tv_dac *tv_dac = NULL;
+
+       tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+
+       if (!tv_dac)
+               return NULL;
+
+       tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family];
+       if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250))
+               tv_dac->ps2_tvdac_adj = 0x00880000;
+       tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+       tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+
+       return tv_dac;
+}
+
+struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+                                                            radeon_encoder
+                                                            *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint16_t dac_info;
+       uint8_t rev, bg, dac;
+       struct radeon_encoder_tv_dac *tv_dac = NULL;
+
+       if (rdev->bios == NULL)
+               return radeon_legacy_get_tv_dac_info_from_table(rdev);
+
+       /* first check TV table */
+       dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+       if (dac_info) {
+               tv_dac =
+                   kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+
+               if (!tv_dac)
+                       return NULL;
+
+               rev = RBIOS8(dac_info + 0x3);
+               if (rev > 4) {
+                       bg = RBIOS8(dac_info + 0xc) & 0xf;
+                       dac = RBIOS8(dac_info + 0xd) & 0xf;
+                       tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+                       bg = RBIOS8(dac_info + 0xe) & 0xf;
+                       dac = RBIOS8(dac_info + 0xf) & 0xf;
+                       tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+                       bg = RBIOS8(dac_info + 0x10) & 0xf;
+                       dac = RBIOS8(dac_info + 0x11) & 0xf;
+                       tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+               } else if (rev > 1) {
+                       bg = RBIOS8(dac_info + 0xc) & 0xf;
+                       dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
+                       tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+                       bg = RBIOS8(dac_info + 0xd) & 0xf;
+                       dac = (RBIOS8(dac_info + 0xd) >> 4) & 0xf;
+                       tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+                       bg = RBIOS8(dac_info + 0xe) & 0xf;
+                       dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
+                       tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+               }
+
+               tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
+
+       } else {
+               /* then check CRT table */
+               dac_info =
+                   combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+               if (dac_info) {
+                       tv_dac =
+                           kzalloc(sizeof(struct radeon_encoder_tv_dac),
+                                   GFP_KERNEL);
+
+                       if (!tv_dac)
+                               return NULL;
+
+                       rev = RBIOS8(dac_info) & 0x3;
+                       if (rev < 2) {
+                               bg = RBIOS8(dac_info + 0x3) & 0xf;
+                               dac = (RBIOS8(dac_info + 0x3) >> 4) & 0xf;
+                               tv_dac->ps2_tvdac_adj =
+                                   (bg << 16) | (dac << 20);
+                               tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+                               tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+                       } else {
+                               bg = RBIOS8(dac_info + 0x4) & 0xf;
+                               dac = RBIOS8(dac_info + 0x5) & 0xf;
+                               tv_dac->ps2_tvdac_adj =
+                                   (bg << 16) | (dac << 20);
+                               tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+                               tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+                       }
+               } else {
+                       DRM_INFO("No TV DAC info found in BIOS\n");
+                       return radeon_legacy_get_tv_dac_info_from_table(rdev);
+               }
+       }
+
+       return tv_dac;
+}
+
+static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
+                                                                        radeon_device
+                                                                        *rdev)
+{
+       struct radeon_encoder_lvds *lvds = NULL;
+       uint32_t fp_vert_stretch, fp_horz_stretch;
+       uint32_t ppll_div_sel, ppll_val;
+
+       lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
+
+       if (!lvds)
+               return NULL;
+
+       fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH);
+       fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH);
+
+       if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
+               lvds->native_mode.panel_yres =
+                   ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
+                    RADEON_VERT_PANEL_SHIFT) + 1;
+       else
+               lvds->native_mode.panel_yres =
+                   (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
+
+       if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
+               lvds->native_mode.panel_xres =
+                   (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
+                     RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
+       else
+               lvds->native_mode.panel_xres =
+                   ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
+
+       if ((lvds->native_mode.panel_xres < 640) ||
+           (lvds->native_mode.panel_yres < 480)) {
+               lvds->native_mode.panel_xres = 640;
+               lvds->native_mode.panel_yres = 480;
+       }
+
+       ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
+       ppll_val = RREG32_PLL(RADEON_PPLL_DIV_0 + ppll_div_sel);
+       if ((ppll_val & 0x000707ff) == 0x1bb)
+               lvds->use_bios_dividers = false;
+       else {
+               lvds->panel_ref_divider =
+                   RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+               lvds->panel_post_divider = (ppll_val >> 16) & 0x7;
+               lvds->panel_fb_divider = ppll_val & 0x7ff;
+
+               if ((lvds->panel_ref_divider != 0) &&
+                   (lvds->panel_fb_divider > 3))
+                       lvds->use_bios_dividers = true;
+       }
+       lvds->panel_vcc_delay = 200;
+
+       DRM_INFO("Panel info derived from registers\n");
+       DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
+                lvds->native_mode.panel_yres);
+
+       return lvds;
+}
+
+struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+                                                        *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint16_t lcd_info;
+       uint32_t panel_setup;
+       char stmp[30];
+       int tmp, i;
+       struct radeon_encoder_lvds *lvds = NULL;
+
+       if (rdev->bios == NULL)
+               return radeon_legacy_get_lvds_info_from_regs(rdev);
+
+       lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+
+       if (lcd_info) {
+               lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
+
+               if (!lvds)
+                       return NULL;
+
+               for (i = 0; i < 24; i++)
+                       stmp[i] = RBIOS8(lcd_info + i + 1);
+               stmp[24] = 0;
+
+               DRM_INFO("Panel ID String: %s\n", stmp);
+
+               lvds->native_mode.panel_xres = RBIOS16(lcd_info + 0x19);
+               lvds->native_mode.panel_yres = RBIOS16(lcd_info + 0x1b);
+
+               DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
+                        lvds->native_mode.panel_yres);
+
+               lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
+               if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
+                       lvds->panel_vcc_delay = 2000;
+
+               lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
+               lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
+               lvds->panel_blon_delay = (RBIOS16(lcd_info + 0x38) >> 4) & 0xf;
+
+               lvds->panel_ref_divider = RBIOS16(lcd_info + 0x2e);
+               lvds->panel_post_divider = RBIOS8(lcd_info + 0x30);
+               lvds->panel_fb_divider = RBIOS16(lcd_info + 0x31);
+               if ((lvds->panel_ref_divider != 0) &&
+                   (lvds->panel_fb_divider > 3))
+                       lvds->use_bios_dividers = true;
+
+               panel_setup = RBIOS32(lcd_info + 0x39);
+               lvds->lvds_gen_cntl = 0xff00;
+               if (panel_setup & 0x1)
+                       lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT;
+
+               if ((panel_setup >> 4) & 0x1)
+                       lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE;
+
+               switch ((panel_setup >> 8) & 0x7) {
+               case 0:
+                       lvds->lvds_gen_cntl |= RADEON_LVDS_NO_FM;
+                       break;
+               case 1:
+                       lvds->lvds_gen_cntl |= RADEON_LVDS_2_GREY;
+                       break;
+               case 2:
+                       lvds->lvds_gen_cntl |= RADEON_LVDS_4_GREY;
+                       break;
+               default:
+                       break;
+               }
+
+               if ((panel_setup >> 16) & 0x1)
+                       lvds->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW;
+
+               if ((panel_setup >> 17) & 0x1)
+                       lvds->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW;
+
+               if ((panel_setup >> 18) & 0x1)
+                       lvds->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW;
+
+               if ((panel_setup >> 23) & 0x1)
+                       lvds->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL;
+
+               lvds->lvds_gen_cntl |= (panel_setup & 0xf0000000);
+
+               for (i = 0; i < 32; i++) {
+                       tmp = RBIOS16(lcd_info + 64 + i * 2);
+                       if (tmp == 0)
+                               break;
+
+                       if ((RBIOS16(tmp) == lvds->native_mode.panel_xres) &&
+                           (RBIOS16(tmp + 2) ==
+                            lvds->native_mode.panel_yres)) {
+                               lvds->native_mode.hblank =
+                                   (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+                               lvds->native_mode.hoverplus =
+                                   (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) -
+                                    1) * 8;
+                               lvds->native_mode.hsync_width =
+                                   RBIOS8(tmp + 23) * 8;
+
+                               lvds->native_mode.vblank = (RBIOS16(tmp + 24) -
+                                                           RBIOS16(tmp + 26));
+                               lvds->native_mode.voverplus =
+                                   ((RBIOS16(tmp + 28) & 0x7ff) -
+                                    RBIOS16(tmp + 26));
+                               lvds->native_mode.vsync_width =
+                                   ((RBIOS16(tmp + 28) & 0xf800) >> 11);
+                               lvds->native_mode.dotclock =
+                                   RBIOS16(tmp + 9) * 10;
+                               lvds->native_mode.flags = 0;
+                       }
+               }
+               encoder->native_mode = lvds->native_mode;
+       } else {
+               DRM_INFO("No panel info found in BIOS\n");
+               return radeon_legacy_get_lvds_info_from_regs(rdev);
+       }
+       return lvds;
+}
+
+static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
+       {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},  /* CHIP_R100  */
+       {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},  /* CHIP_RV100 */
+       {{0, 0}, {0, 0}, {0, 0}, {0, 0}},       /* CHIP_RS100 */
+       {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},  /* CHIP_RV200 */
+       {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},  /* CHIP_RS200 */
+       {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},  /* CHIP_R200  */
+       {{15500, 0x81b}, {0xffffffff, 0x83f}, {0, 0}, {0, 0}},  /* CHIP_RV250 */
+       {{0, 0}, {0, 0}, {0, 0}, {0, 0}},       /* CHIP_RS300 */
+       {{13000, 0x400f4}, {15000, 0x400f7}, {0xffffffff, 0x40111}, {0, 0}},    /* CHIP_RV280 */
+       {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},        /* CHIP_R300  */
+       {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},        /* CHIP_R350  */
+       {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},      /* CHIP_RV350 */
+       {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},      /* CHIP_RV380 */
+       {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},        /* CHIP_R420  */
+       {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},        /* CHIP_R423  */
+       {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},        /* CHIP_RV410 */
+       {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},      /* CHIP_RS400 */
+       {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},      /* CHIP_RS480 */
+};
+
+static struct radeon_encoder_int_tmds
+    *radeon_legacy_get_tmds_info_from_table(struct radeon_device *rdev)
+{
+       int i;
+       struct radeon_encoder_int_tmds *tmds = NULL;
+
+       tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+
+       if (!tmds)
+               return NULL;
+
+       for (i = 0; i < 4; i++) {
+               tmds->tmds_pll[i].value =
+                   default_tmds_pll[rdev->family][i].value;
+               tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq;
+       }
+
+       return tmds;
+}
+
+struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct
+                                                            radeon_encoder
+                                                            *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint16_t tmds_info;
+       int i, n;
+       uint8_t ver;
+       struct radeon_encoder_int_tmds *tmds = NULL;
+
+       if (rdev->bios == NULL)
+               return radeon_legacy_get_tmds_info_from_table(rdev);
+
+       tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+
+       if (tmds_info) {
+               tmds =
+                   kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+
+               if (!tmds)
+                       return NULL;
+
+               ver = RBIOS8(tmds_info);
+               DRM_INFO("DFP table revision: %d\n", ver);
+               if (ver == 3) {
+                       n = RBIOS8(tmds_info + 5) + 1;
+                       if (n > 4)
+                               n = 4;
+                       for (i = 0; i < n; i++) {
+                               tmds->tmds_pll[i].value =
+                                   RBIOS32(tmds_info + i * 10 + 0x08);
+                               tmds->tmds_pll[i].freq =
+                                   RBIOS16(tmds_info + i * 10 + 0x10);
+                               DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
+                                         tmds->tmds_pll[i].freq,
+                                         tmds->tmds_pll[i].value);
+                       }
+               } else if (ver == 4) {
+                       int stride = 0;
+                       n = RBIOS8(tmds_info + 5) + 1;
+                       if (n > 4)
+                               n = 4;
+                       for (i = 0; i < n; i++) {
+                               tmds->tmds_pll[i].value =
+                                   RBIOS32(tmds_info + stride + 0x08);
+                               tmds->tmds_pll[i].freq =
+                                   RBIOS16(tmds_info + stride + 0x10);
+                               if (i == 0)
+                                       stride += 10;
+                               else
+                                       stride += 6;
+                               DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
+                                         tmds->tmds_pll[i].freq,
+                                         tmds->tmds_pll[i].value);
+                       }
+               }
+       } else
+               DRM_INFO("No TMDS info found in BIOS\n");
+       return tmds;
+}
+
+void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint16_t ext_tmds_info;
+       uint8_t ver;
+
+       if (rdev->bios == NULL)
+               return;
+
+       ext_tmds_info =
+           combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+       if (ext_tmds_info) {
+               ver = RBIOS8(ext_tmds_info);
+               DRM_INFO("External TMDS Table revision: %d\n", ver);
+               // TODO
+       }
+}
+
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_i2c_bus_rec ddc_i2c;
+
+       rdev->mode_info.connector_table = radeon_connector_table;
+       if (rdev->mode_info.connector_table == CT_NONE) {
+#ifdef CONFIG_PPC_PMAC
+               if (machine_is_compatible("PowerBook3,3")) {
+                       /* powerbook with VGA */
+                       rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
+               } else if (machine_is_compatible("PowerBook3,4") ||
+                          machine_is_compatible("PowerBook3,5")) {
+                       /* powerbook with internal tmds */
+                       rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
+               } else if (machine_is_compatible("PowerBook5,1") ||
+                          machine_is_compatible("PowerBook5,2") ||
+                          machine_is_compatible("PowerBook5,3") ||
+                          machine_is_compatible("PowerBook5,4") ||
+                          machine_is_compatible("PowerBook5,5")) {
+                       /* powerbook with external single link tmds (sil164) */
+                       rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+               } else if (machine_is_compatible("PowerBook5,6")) {
+                       /* powerbook with external dual or single link tmds */
+                       rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+               } else if (machine_is_compatible("PowerBook5,7") ||
+                          machine_is_compatible("PowerBook5,8") ||
+                          machine_is_compatible("PowerBook5,9")) {
+                       /* PowerBook6,2 ? */
+                       /* powerbook with external dual link tmds (sil1178?) */
+                       rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+               } else if (machine_is_compatible("PowerBook4,1") ||
+                          machine_is_compatible("PowerBook4,2") ||
+                          machine_is_compatible("PowerBook4,3") ||
+                          machine_is_compatible("PowerBook6,3") ||
+                          machine_is_compatible("PowerBook6,5") ||
+                          machine_is_compatible("PowerBook6,7")) {
+                       /* ibook */
+                       rdev->mode_info.connector_table = CT_IBOOK;
+               } else if (machine_is_compatible("PowerMac4,4")) {
+                       /* emac */
+                       rdev->mode_info.connector_table = CT_EMAC;
+               } else if (machine_is_compatible("PowerMac10,1")) {
+                       /* mini with internal tmds */
+                       rdev->mode_info.connector_table = CT_MINI_INTERNAL;
+               } else if (machine_is_compatible("PowerMac10,2")) {
+                       /* mini with external tmds */
+                       rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
+               } else if (machine_is_compatible("PowerMac12,1")) {
+                       /* PowerMac8,1 ? */
+                       /* imac g5 isight */
+                       rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+               } else
+#endif /* CONFIG_PPC_PMAC */
+                       rdev->mode_info.connector_table = CT_GENERIC;
+       }
+
+       switch (rdev->mode_info.connector_table) {
+       case CT_GENERIC:
+               DRM_INFO("Connector Table: %d (generic)\n",
+                        rdev->mode_info.connector_table);
+               /* these are the most common settings */
+               if (rdev->flags & RADEON_SINGLE_CRTC) {
+                       /* VGA - primary dac */
+                       ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       ATOM_DEVICE_CRT1_SUPPORT,
+                                                                       1),
+                                                 ATOM_DEVICE_CRT1_SUPPORT);
+                       radeon_add_legacy_connector(dev, 0,
+                                                   ATOM_DEVICE_CRT1_SUPPORT,
+                                                   DRM_MODE_CONNECTOR_VGA,
+                                                   &ddc_i2c);
+               } else if (rdev->flags & RADEON_IS_MOBILITY) {
+                       /* LVDS */
+                       ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       ATOM_DEVICE_LCD1_SUPPORT,
+                                                                       0),
+                                                 ATOM_DEVICE_LCD1_SUPPORT);
+                       radeon_add_legacy_connector(dev, 0,
+                                                   ATOM_DEVICE_LCD1_SUPPORT,
+                                                   DRM_MODE_CONNECTOR_LVDS,
+                                                   &ddc_i2c);
+
+                       /* VGA - primary dac */
+                       ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       ATOM_DEVICE_CRT1_SUPPORT,
+                                                                       1),
+                                                 ATOM_DEVICE_CRT1_SUPPORT);
+                       radeon_add_legacy_connector(dev, 1,
+                                                   ATOM_DEVICE_CRT1_SUPPORT,
+                                                   DRM_MODE_CONNECTOR_VGA,
+                                                   &ddc_i2c);
+               } else {
+                       /* DVI-I - tv dac, int tmds */
+                       ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       ATOM_DEVICE_DFP1_SUPPORT,
+                                                                       0),
+                                                 ATOM_DEVICE_DFP1_SUPPORT);
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       ATOM_DEVICE_CRT2_SUPPORT,
+                                                                       2),
+                                                 ATOM_DEVICE_CRT2_SUPPORT);
+                       radeon_add_legacy_connector(dev, 0,
+                                                   ATOM_DEVICE_DFP1_SUPPORT |
+                                                   ATOM_DEVICE_CRT2_SUPPORT,
+                                                   DRM_MODE_CONNECTOR_DVII,
+                                                   &ddc_i2c);
+
+                       /* VGA - primary dac */
+                       ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       ATOM_DEVICE_CRT1_SUPPORT,
+                                                                       1),
+                                                 ATOM_DEVICE_CRT1_SUPPORT);
+                       radeon_add_legacy_connector(dev, 1,
+                                                   ATOM_DEVICE_CRT1_SUPPORT,
+                                                   DRM_MODE_CONNECTOR_VGA,
+                                                   &ddc_i2c);
+               }
+
+               if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+                       /* TV - tv dac */
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       ATOM_DEVICE_TV1_SUPPORT,
+                                                                       2),
+                                                 ATOM_DEVICE_TV1_SUPPORT);
+                       radeon_add_legacy_connector(dev, 2,
+                                                   ATOM_DEVICE_TV1_SUPPORT,
+                                                   DRM_MODE_CONNECTOR_SVIDEO,
+                                                   &ddc_i2c);
+               }
+               break;
+       case CT_IBOOK:
+               DRM_INFO("Connector Table: %d (ibook)\n",
+                        rdev->mode_info.connector_table);
+               /* LVDS */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_LCD1_SUPPORT,
+                                                               0),
+                                         ATOM_DEVICE_LCD1_SUPPORT);
+               radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+               /* VGA - TV DAC */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_CRT2_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_CRT2_SUPPORT);
+               radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+                                           DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+               /* TV - TV DAC */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_TV1_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_TV1_SUPPORT);
+               radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_SVIDEO,
+                                           &ddc_i2c);
+               break;
+       case CT_POWERBOOK_EXTERNAL:
+               DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
+                        rdev->mode_info.connector_table);
+               /* LVDS */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_LCD1_SUPPORT,
+                                                               0),
+                                         ATOM_DEVICE_LCD1_SUPPORT);
+               radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+               /* DVI-I - primary dac, ext tmds */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_DFP2_SUPPORT,
+                                                               0),
+                                         ATOM_DEVICE_DFP2_SUPPORT);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_CRT1_SUPPORT,
+                                                               1),
+                                         ATOM_DEVICE_CRT1_SUPPORT);
+               radeon_add_legacy_connector(dev, 1,
+                                           ATOM_DEVICE_DFP2_SUPPORT |
+                                           ATOM_DEVICE_CRT1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+               /* TV - TV DAC */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_TV1_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_TV1_SUPPORT);
+               radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_SVIDEO,
+                                           &ddc_i2c);
+               break;
+       case CT_POWERBOOK_INTERNAL:
+               DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
+                        rdev->mode_info.connector_table);
+               /* LVDS */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_LCD1_SUPPORT,
+                                                               0),
+                                         ATOM_DEVICE_LCD1_SUPPORT);
+               radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+               /* DVI-I - primary dac, int tmds */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_DFP1_SUPPORT,
+                                                               0),
+                                         ATOM_DEVICE_DFP1_SUPPORT);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_CRT1_SUPPORT,
+                                                               1),
+                                         ATOM_DEVICE_CRT1_SUPPORT);
+               radeon_add_legacy_connector(dev, 1,
+                                           ATOM_DEVICE_DFP1_SUPPORT |
+                                           ATOM_DEVICE_CRT1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+               /* TV - TV DAC */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_TV1_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_TV1_SUPPORT);
+               radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_SVIDEO,
+                                           &ddc_i2c);
+               break;
+       case CT_POWERBOOK_VGA:
+               DRM_INFO("Connector Table: %d (powerbook vga)\n",
+                        rdev->mode_info.connector_table);
+               /* LVDS */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_LCD1_SUPPORT,
+                                                               0),
+                                         ATOM_DEVICE_LCD1_SUPPORT);
+               radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+               /* VGA - primary dac */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_CRT1_SUPPORT,
+                                                               1),
+                                         ATOM_DEVICE_CRT1_SUPPORT);
+               radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+               /* TV - TV DAC */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_TV1_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_TV1_SUPPORT);
+               radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_SVIDEO,
+                                           &ddc_i2c);
+               break;
+       case CT_MINI_EXTERNAL:
+               DRM_INFO("Connector Table: %d (mini external tmds)\n",
+                        rdev->mode_info.connector_table);
+               /* DVI-I - tv dac, ext tmds */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_DFP2_SUPPORT,
+                                                               0),
+                                         ATOM_DEVICE_DFP2_SUPPORT);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_CRT2_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_CRT2_SUPPORT);
+               radeon_add_legacy_connector(dev, 0,
+                                           ATOM_DEVICE_DFP2_SUPPORT |
+                                           ATOM_DEVICE_CRT2_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+               /* TV - TV DAC */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_TV1_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_TV1_SUPPORT);
+               radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_SVIDEO,
+                                           &ddc_i2c);
+               break;
+       case CT_MINI_INTERNAL:
+               DRM_INFO("Connector Table: %d (mini internal tmds)\n",
+                        rdev->mode_info.connector_table);
+               /* DVI-I - tv dac, int tmds */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_DFP1_SUPPORT,
+                                                               0),
+                                         ATOM_DEVICE_DFP1_SUPPORT);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_CRT2_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_CRT2_SUPPORT);
+               radeon_add_legacy_connector(dev, 0,
+                                           ATOM_DEVICE_DFP1_SUPPORT |
+                                           ATOM_DEVICE_CRT2_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+               /* TV - TV DAC */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_TV1_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_TV1_SUPPORT);
+               radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_SVIDEO,
+                                           &ddc_i2c);
+               break;
+       case CT_IMAC_G5_ISIGHT:
+               DRM_INFO("Connector Table: %d (imac g5 isight)\n",
+                        rdev->mode_info.connector_table);
+               /* DVI-D - int tmds */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_DFP1_SUPPORT,
+                                                               0),
+                                         ATOM_DEVICE_DFP1_SUPPORT);
+               radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVID, &ddc_i2c);
+               /* VGA - tv dac */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_CRT2_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_CRT2_SUPPORT);
+               radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+                                           DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+               /* TV - TV DAC */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_TV1_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_TV1_SUPPORT);
+               radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_SVIDEO,
+                                           &ddc_i2c);
+               break;
+       case CT_EMAC:
+               DRM_INFO("Connector Table: %d (emac)\n",
+                        rdev->mode_info.connector_table);
+               /* VGA - primary dac */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_CRT1_SUPPORT,
+                                                               1),
+                                         ATOM_DEVICE_CRT1_SUPPORT);
+               radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+               /* VGA - tv dac */
+               ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_CRT2_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_CRT2_SUPPORT);
+               radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+                                           DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+               /* TV - TV DAC */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_id(dev,
+                                                               ATOM_DEVICE_TV1_SUPPORT,
+                                                               2),
+                                         ATOM_DEVICE_TV1_SUPPORT);
+               radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_SVIDEO,
+                                           &ddc_i2c);
+               break;
+       default:
+               DRM_INFO("Connector table: %d (invalid)\n",
+                        rdev->mode_info.connector_table);
+               return false;
+       }
+
+       radeon_link_encoder_connector(dev);
+
+       return true;
+}
+
+static bool radeon_apply_legacy_quirks(struct drm_device *dev,
+                                      int bios_index,
+                                      enum radeon_combios_connector
+                                      *legacy_connector,
+                                      struct radeon_i2c_bus_rec *ddc_i2c)
+{
+       struct radeon_device *rdev = dev->dev_private;
+
+       /* XPRESS DDC quirks */
+       if ((rdev->family == CHIP_RS400 ||
+            rdev->family == CHIP_RS480) &&
+           ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+               *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+       else if ((rdev->family == CHIP_RS400 ||
+                 rdev->family == CHIP_RS480) &&
+                ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
+               ddc_i2c->valid = true;
+               ddc_i2c->mask_clk_mask = (0x20 << 8);
+               ddc_i2c->mask_data_mask = 0x80;
+               ddc_i2c->a_clk_mask = (0x20 << 8);
+               ddc_i2c->a_data_mask = 0x80;
+               ddc_i2c->put_clk_mask = (0x20 << 8);
+               ddc_i2c->put_data_mask = 0x80;
+               ddc_i2c->get_clk_mask = (0x20 << 8);
+               ddc_i2c->get_data_mask = 0x80;
+               ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
+               ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
+               ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
+               ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
+               ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
+               ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
+               ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
+               ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
+       }
+
+       /* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
+          one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
+       if (dev->pdev->device == 0x515e &&
+           dev->pdev->subsystem_vendor == 0x1014) {
+               if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
+                   ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+                       return false;
+       }
+
+       /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */
+       if (dev->pdev->device == 0x5159 &&
+           dev->pdev->subsystem_vendor == 0x1002 &&
+           dev->pdev->subsystem_device == 0x013a) {
+               if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
+                       *legacy_connector = CONNECTOR_CRT_LEGACY;
+
+       }
+
+       /* X300 card with extra non-existent DVI port */
+       if (dev->pdev->device == 0x5B60 &&
+           dev->pdev->subsystem_vendor == 0x17af &&
+           dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
+               if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
+                       return false;
+       }
+
+       return true;
+}
+
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t conn_info, entry, devices;
+       uint16_t tmp;
+       enum radeon_combios_ddc ddc_type;
+       enum radeon_combios_connector connector;
+       int i = 0;
+       struct radeon_i2c_bus_rec ddc_i2c;
+
+       if (rdev->bios == NULL)
+               return false;
+
+       conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
+       if (conn_info) {
+               for (i = 0; i < 4; i++) {
+                       entry = conn_info + 2 + i * 2;
+
+                       if (!RBIOS16(entry))
+                               break;
+
+                       tmp = RBIOS16(entry);
+
+                       connector = (tmp >> 12) & 0xf;
+
+                       ddc_type = (tmp >> 8) & 0xf;
+                       switch (ddc_type) {
+                       case DDC_MONID:
+                               ddc_i2c =
+                                   combios_setup_i2c_bus(RADEON_GPIO_MONID);
+                               break;
+                       case DDC_DVI:
+                               ddc_i2c =
+                                   combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+                               break;
+                       case DDC_VGA:
+                               ddc_i2c =
+                                   combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+                               break;
+                       case DDC_CRT2:
+                               ddc_i2c =
+                                   combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+                               break;
+                       default:
+                               break;
+                       }
+
+                       radeon_apply_legacy_quirks(dev, i, &connector,
+                                                  &ddc_i2c);
+
+                       switch (connector) {
+                       case CONNECTOR_PROPRIETARY_LEGACY:
+                               if ((tmp >> 4) & 0x1)
+                                       devices = ATOM_DEVICE_DFP2_SUPPORT;
+                               else
+                                       devices = ATOM_DEVICE_DFP1_SUPPORT;
+                               radeon_add_legacy_encoder(dev,
+                                                         radeon_get_encoder_id
+                                                         (dev, devices, 0),
+                                                         devices);
+                               radeon_add_legacy_connector(dev, i, devices,
+                                                           legacy_connector_convert
+                                                           [connector],
+                                                           &ddc_i2c);
+                               break;
+                       case CONNECTOR_CRT_LEGACY:
+                               if (tmp & 0x1) {
+                                       devices = ATOM_DEVICE_CRT2_SUPPORT;
+                                       radeon_add_legacy_encoder(dev,
+                                                                 radeon_get_encoder_id
+                                                                 (dev,
+                                                                  ATOM_DEVICE_CRT2_SUPPORT,
+                                                                  2),
+                                                                 ATOM_DEVICE_CRT2_SUPPORT);
+                               } else {
+                                       devices = ATOM_DEVICE_CRT1_SUPPORT;
+                                       radeon_add_legacy_encoder(dev,
+                                                                 radeon_get_encoder_id
+                                                                 (dev,
+                                                                  ATOM_DEVICE_CRT1_SUPPORT,
+                                                                  1),
+                                                                 ATOM_DEVICE_CRT1_SUPPORT);
+                               }
+                               radeon_add_legacy_connector(dev,
+                                                           i,
+                                                           devices,
+                                                           legacy_connector_convert
+                                                           [connector],
+                                                           &ddc_i2c);
+                               break;
+                       case CONNECTOR_DVI_I_LEGACY:
+                               devices = 0;
+                               if (tmp & 0x1) {
+                                       devices |= ATOM_DEVICE_CRT2_SUPPORT;
+                                       radeon_add_legacy_encoder(dev,
+                                                                 radeon_get_encoder_id
+                                                                 (dev,
+                                                                  ATOM_DEVICE_CRT2_SUPPORT,
+                                                                  2),
+                                                                 ATOM_DEVICE_CRT2_SUPPORT);
+                               } else {
+                                       devices |= ATOM_DEVICE_CRT1_SUPPORT;
+                                       radeon_add_legacy_encoder(dev,
+                                                                 radeon_get_encoder_id
+                                                                 (dev,
+                                                                  ATOM_DEVICE_CRT1_SUPPORT,
+                                                                  1),
+                                                                 ATOM_DEVICE_CRT1_SUPPORT);
+                               }
+                               if ((tmp >> 4) & 0x1) {
+                                       devices |= ATOM_DEVICE_DFP2_SUPPORT;
+                                       radeon_add_legacy_encoder(dev,
+                                                                 radeon_get_encoder_id
+                                                                 (dev,
+                                                                  ATOM_DEVICE_DFP2_SUPPORT,
+                                                                  0),
+                                                                 ATOM_DEVICE_DFP2_SUPPORT);
+                               } else {
+                                       devices |= ATOM_DEVICE_DFP1_SUPPORT;
+                                       radeon_add_legacy_encoder(dev,
+                                                                 radeon_get_encoder_id
+                                                                 (dev,
+                                                                  ATOM_DEVICE_DFP1_SUPPORT,
+                                                                  0),
+                                                                 ATOM_DEVICE_DFP1_SUPPORT);
+                               }
+                               radeon_add_legacy_connector(dev,
+                                                           i,
+                                                           devices,
+                                                           legacy_connector_convert
+                                                           [connector],
+                                                           &ddc_i2c);
+                               break;
+                       case CONNECTOR_DVI_D_LEGACY:
+                               if ((tmp >> 4) & 0x1)
+                                       devices = ATOM_DEVICE_DFP2_SUPPORT;
+                               else
+                                       devices = ATOM_DEVICE_DFP1_SUPPORT;
+                               radeon_add_legacy_encoder(dev,
+                                                         radeon_get_encoder_id
+                                                         (dev, devices, 0),
+                                                         devices);
+                               radeon_add_legacy_connector(dev, i, devices,
+                                                           legacy_connector_convert
+                                                           [connector],
+                                                           &ddc_i2c);
+                               break;
+                       case CONNECTOR_CTV_LEGACY:
+                       case CONNECTOR_STV_LEGACY:
+                               radeon_add_legacy_encoder(dev,
+                                                         radeon_get_encoder_id
+                                                         (dev,
+                                                          ATOM_DEVICE_TV1_SUPPORT,
+                                                          2),
+                                                         ATOM_DEVICE_TV1_SUPPORT);
+                               radeon_add_legacy_connector(dev, i,
+                                                           ATOM_DEVICE_TV1_SUPPORT,
+                                                           legacy_connector_convert
+                                                           [connector],
+                                                           &ddc_i2c);
+                               break;
+                       default:
+                               DRM_ERROR("Unknown connector type: %d\n",
+                                         connector);
+                               continue;
+                       }
+
+               }
+       } else {
+               uint16_t tmds_info =
+                   combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+               if (tmds_info) {
+                       DRM_DEBUG("Found DFP table, assuming DVI connector\n");
+
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       ATOM_DEVICE_CRT1_SUPPORT,
+                                                                       1),
+                                                 ATOM_DEVICE_CRT1_SUPPORT);
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       ATOM_DEVICE_DFP1_SUPPORT,
+                                                                       0),
+                                                 ATOM_DEVICE_DFP1_SUPPORT);
+
+                       ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+                       radeon_add_legacy_connector(dev,
+                                                   0,
+                                                   ATOM_DEVICE_CRT1_SUPPORT |
+                                                   ATOM_DEVICE_DFP1_SUPPORT,
+                                                   DRM_MODE_CONNECTOR_DVII,
+                                                   &ddc_i2c);
+               } else {
+                       DRM_DEBUG("No connector info found\n");
+                       return false;
+               }
+       }
+
+       if (rdev->flags & RADEON_IS_MOBILITY || rdev->flags & RADEON_IS_IGP) {
+               uint16_t lcd_info =
+                   combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+               if (lcd_info) {
+                       uint16_t lcd_ddc_info =
+                           combios_get_table_offset(dev,
+                                                    COMBIOS_LCD_DDC_INFO_TABLE);
+
+                       radeon_add_legacy_encoder(dev,
+                                                 radeon_get_encoder_id(dev,
+                                                                       ATOM_DEVICE_LCD1_SUPPORT,
+                                                                       0),
+                                                 ATOM_DEVICE_LCD1_SUPPORT);
+
+                       if (lcd_ddc_info) {
+                               ddc_type = RBIOS8(lcd_ddc_info + 2);
+                               switch (ddc_type) {
+                               case DDC_MONID:
+                                       ddc_i2c =
+                                           combios_setup_i2c_bus
+                                           (RADEON_GPIO_MONID);
+                                       break;
+                               case DDC_DVI:
+                                       ddc_i2c =
+                                           combios_setup_i2c_bus
+                                           (RADEON_GPIO_DVI_DDC);
+                                       break;
+                               case DDC_VGA:
+                                       ddc_i2c =
+                                           combios_setup_i2c_bus
+                                           (RADEON_GPIO_VGA_DDC);
+                                       break;
+                               case DDC_CRT2:
+                                       ddc_i2c =
+                                           combios_setup_i2c_bus
+                                           (RADEON_GPIO_CRT2_DDC);
+                                       break;
+                               case DDC_LCD:
+                                       ddc_i2c =
+                                           combios_setup_i2c_bus
+                                           (RADEON_LCD_GPIO_MASK);
+                                       ddc_i2c.mask_clk_mask =
+                                           RBIOS32(lcd_ddc_info + 3);
+                                       ddc_i2c.mask_data_mask =
+                                           RBIOS32(lcd_ddc_info + 7);
+                                       ddc_i2c.a_clk_mask =
+                                           RBIOS32(lcd_ddc_info + 3);
+                                       ddc_i2c.a_data_mask =
+                                           RBIOS32(lcd_ddc_info + 7);
+                                       ddc_i2c.put_clk_mask =
+                                           RBIOS32(lcd_ddc_info + 3);
+                                       ddc_i2c.put_data_mask =
+                                           RBIOS32(lcd_ddc_info + 7);
+                                       ddc_i2c.get_clk_mask =
+                                           RBIOS32(lcd_ddc_info + 3);
+                                       ddc_i2c.get_data_mask =
+                                           RBIOS32(lcd_ddc_info + 7);
+                                       break;
+                               case DDC_GPIO:
+                                       ddc_i2c =
+                                           combios_setup_i2c_bus
+                                           (RADEON_MDGPIO_EN_REG);
+                                       ddc_i2c.mask_clk_mask =
+                                           RBIOS32(lcd_ddc_info + 3);
+                                       ddc_i2c.mask_data_mask =
+                                           RBIOS32(lcd_ddc_info + 7);
+                                       ddc_i2c.a_clk_mask =
+                                           RBIOS32(lcd_ddc_info + 3);
+                                       ddc_i2c.a_data_mask =
+                                           RBIOS32(lcd_ddc_info + 7);
+                                       ddc_i2c.put_clk_mask =
+                                           RBIOS32(lcd_ddc_info + 3);
+                                       ddc_i2c.put_data_mask =
+                                           RBIOS32(lcd_ddc_info + 7);
+                                       ddc_i2c.get_clk_mask =
+                                           RBIOS32(lcd_ddc_info + 3);
+                                       ddc_i2c.get_data_mask =
+                                           RBIOS32(lcd_ddc_info + 7);
+                                       break;
+                               default:
+                                       ddc_i2c.valid = false;
+                                       break;
+                               }
+                               DRM_DEBUG("LCD DDC Info Table found!\n");
+                       } else
+                               ddc_i2c.valid = false;
+
+                       radeon_add_legacy_connector(dev,
+                                                   5,
+                                                   ATOM_DEVICE_LCD1_SUPPORT,
+                                                   DRM_MODE_CONNECTOR_LVDS,
+                                                   &ddc_i2c);
+               }
+       }
+
+       /* check TV table */
+       if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+               uint32_t tv_info =
+                   combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+               if (tv_info) {
+                       if (RBIOS8(tv_info + 6) == 'T') {
+                               radeon_add_legacy_encoder(dev,
+                                                         radeon_get_encoder_id
+                                                         (dev,
+                                                          ATOM_DEVICE_TV1_SUPPORT,
+                                                          2),
+                                                         ATOM_DEVICE_TV1_SUPPORT);
+                               radeon_add_legacy_connector(dev, 6,
+                                                           ATOM_DEVICE_TV1_SUPPORT,
+                                                           DRM_MODE_CONNECTOR_SVIDEO,
+                                                           &ddc_i2c);
+                       }
+               }
+       }
+
+       radeon_link_encoder_connector(dev);
+
+       return true;
+}
+
+static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
+{
+       struct radeon_device *rdev = dev->dev_private;
+
+       if (offset) {
+               while (RBIOS16(offset)) {
+                       uint16_t cmd = ((RBIOS16(offset) & 0xe000) >> 13);
+                       uint32_t addr = (RBIOS16(offset) & 0x1fff);
+                       uint32_t val, and_mask, or_mask;
+                       uint32_t tmp;
+
+                       offset += 2;
+                       switch (cmd) {
+                       case 0:
+                               val = RBIOS32(offset);
+                               offset += 4;
+                               WREG32(addr, val);
+                               break;
+                       case 1:
+                               val = RBIOS32(offset);
+                               offset += 4;
+                               WREG32(addr, val);
+                               break;
+                       case 2:
+                               and_mask = RBIOS32(offset);
+                               offset += 4;
+                               or_mask = RBIOS32(offset);
+                               offset += 4;
+                               tmp = RREG32(addr);
+                               tmp &= and_mask;
+                               tmp |= or_mask;
+                               WREG32(addr, tmp);
+                               break;
+                       case 3:
+                               and_mask = RBIOS32(offset);
+                               offset += 4;
+                               or_mask = RBIOS32(offset);
+                               offset += 4;
+                               tmp = RREG32(addr);
+                               tmp &= and_mask;
+                               tmp |= or_mask;
+                               WREG32(addr, tmp);
+                               break;
+                       case 4:
+                               val = RBIOS16(offset);
+                               offset += 2;
+                               udelay(val);
+                               break;
+                       case 5:
+                               val = RBIOS16(offset);
+                               offset += 2;
+                               switch (addr) {
+                               case 8:
+                                       while (val--) {
+                                               if (!
+                                                   (RREG32_PLL
+                                                    (RADEON_CLK_PWRMGT_CNTL) &
+                                                    RADEON_MC_BUSY))
+                                                       break;
+                                       }
+                                       break;
+                               case 9:
+                                       while (val--) {
+                                               if ((RREG32(RADEON_MC_STATUS) &
+                                                    RADEON_MC_IDLE))
+                                                       break;
+                                       }
+                                       break;
+                               default:
+                                       break;
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+}
+
+static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
+{
+       struct radeon_device *rdev = dev->dev_private;
+
+       if (offset) {
+               while (RBIOS8(offset)) {
+                       uint8_t cmd = ((RBIOS8(offset) & 0xc0) >> 6);
+                       uint8_t addr = (RBIOS8(offset) & 0x3f);
+                       uint32_t val, shift, tmp;
+                       uint32_t and_mask, or_mask;
+
+                       offset++;
+                       switch (cmd) {
+                       case 0:
+                               val = RBIOS32(offset);
+                               offset += 4;
+                               WREG32_PLL(addr, val);
+                               break;
+                       case 1:
+                               shift = RBIOS8(offset) * 8;
+                               offset++;
+                               and_mask = RBIOS8(offset) << shift;
+                               and_mask |= ~(0xff << shift);
+                               offset++;
+                               or_mask = RBIOS8(offset) << shift;
+                               offset++;
+                               tmp = RREG32_PLL(addr);
+                               tmp &= and_mask;
+                               tmp |= or_mask;
+                               WREG32_PLL(addr, tmp);
+                               break;
+                       case 2:
+                       case 3:
+                               tmp = 1000;
+                               switch (addr) {
+                               case 1:
+                                       udelay(150);
+                                       break;
+                               case 2:
+                                       udelay(1000);
+                                       break;
+                               case 3:
+                                       while (tmp--) {
+                                               if (!
+                                                   (RREG32_PLL
+                                                    (RADEON_CLK_PWRMGT_CNTL) &
+                                                    RADEON_MC_BUSY))
+                                                       break;
+                                       }
+                                       break;
+                               case 4:
+                                       while (tmp--) {
+                                               if (RREG32_PLL
+                                                   (RADEON_CLK_PWRMGT_CNTL) &
+                                                   RADEON_DLL_READY)
+                                                       break;
+                                       }
+                                       break;
+                               case 5:
+                                       tmp =
+                                           RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+                                       if (tmp & RADEON_CG_NO1_DEBUG_0) {
+#if 0
+                                               uint32_t mclk_cntl =
+                                                   RREG32_PLL
+                                                   (RADEON_MCLK_CNTL);
+                                               mclk_cntl &= 0xffff0000;
+                                               /*mclk_cntl |= 0x00001111;*//* ??? */
+                                               WREG32_PLL(RADEON_MCLK_CNTL,
+                                                          mclk_cntl);
+                                               udelay(10000);
+#endif
+                                               WREG32_PLL
+                                                   (RADEON_CLK_PWRMGT_CNTL,
+                                                    tmp &
+                                                    ~RADEON_CG_NO1_DEBUG_0);
+                                               udelay(10000);
+                                       }
+                                       break;
+                               default:
+                                       break;
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+}
+
+static void combios_parse_ram_reset_table(struct drm_device *dev,
+                                         uint16_t offset)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t tmp;
+
+       if (offset) {
+               uint8_t val = RBIOS8(offset);
+               while (val != 0xff) {
+                       offset++;
+
+                       if (val == 0x0f) {
+                               uint32_t channel_complete_mask;
+
+                               if (ASIC_IS_R300(rdev))
+                                       channel_complete_mask =
+                                           R300_MEM_PWRUP_COMPLETE;
+                               else
+                                       channel_complete_mask =
+                                           RADEON_MEM_PWRUP_COMPLETE;
+                               tmp = 20000;
+                               while (tmp--) {
+                                       if ((RREG32(RADEON_MEM_STR_CNTL) &
+                                            channel_complete_mask) ==
+                                           channel_complete_mask)
+                                               break;
+                               }
+                       } else {
+                               uint32_t or_mask = RBIOS16(offset);
+                               offset += 2;
+
+                               tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+                               tmp &= RADEON_SDRAM_MODE_MASK;
+                               tmp |= or_mask;
+                               WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+
+                               or_mask = val << 24;
+                               tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+                               tmp &= RADEON_B3MEM_RESET_MASK;
+                               tmp |= or_mask;
+                               WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+                       }
+                       val = RBIOS8(offset);
+               }
+       }
+}
+
+static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
+                                  int mem_addr_mapping)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t mem_cntl;
+       uint32_t mem_size;
+       uint32_t addr = 0;
+
+       mem_cntl = RREG32(RADEON_MEM_CNTL);
+       if (mem_cntl & RV100_HALF_MODE)
+               ram /= 2;
+       mem_size = ram;
+       mem_cntl &= ~(0xff << 8);
+       mem_cntl |= (mem_addr_mapping & 0xff) << 8;
+       WREG32(RADEON_MEM_CNTL, mem_cntl);
+       RREG32(RADEON_MEM_CNTL);
+
+       /* sdram reset ? */
+
+       /* something like this????  */
+       while (ram--) {
+               addr = ram * 1024 * 1024;
+               /* write to each page */
+               WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
+               WREG32(RADEON_MM_DATA, 0xdeadbeef);
+               /* read back and verify */
+               WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
+               if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
+                       return 0;
+       }
+
+       return mem_size;
+}
+
+static void combios_write_ram_size(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       uint8_t rev;
+       uint16_t offset;
+       uint32_t mem_size = 0;
+       uint32_t mem_cntl = 0;
+
+       /* should do something smarter here I guess... */
+       if (rdev->flags & RADEON_IS_IGP)
+               return;
+
+       /* first check detected mem table */
+       offset = combios_get_table_offset(dev, COMBIOS_DETECTED_MEM_TABLE);
+       if (offset) {
+               rev = RBIOS8(offset);
+               if (rev < 3) {
+                       mem_cntl = RBIOS32(offset + 1);
+                       mem_size = RBIOS16(offset + 5);
+                       if (((rdev->flags & RADEON_FAMILY_MASK) < CHIP_R200) &&
+                           ((dev->pdev->device != 0x515e)
+                            && (dev->pdev->device != 0x5969)))
+                               WREG32(RADEON_MEM_CNTL, mem_cntl);
+               }
+       }
+
+       if (!mem_size) {
+               offset =
+                   combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+               if (offset) {
+                       rev = RBIOS8(offset - 1);
+                       if (rev < 1) {
+                               if (((rdev->flags & RADEON_FAMILY_MASK) <
+                                    CHIP_R200)
+                                   && ((dev->pdev->device != 0x515e)
+                                       && (dev->pdev->device != 0x5969))) {
+                                       int ram = 0;
+                                       int mem_addr_mapping = 0;
+
+                                       while (RBIOS8(offset)) {
+                                               ram = RBIOS8(offset);
+                                               mem_addr_mapping =
+                                                   RBIOS8(offset + 1);
+                                               if (mem_addr_mapping != 0x25)
+                                                       ram *= 2;
+                                               mem_size =
+                                                   combios_detect_ram(dev, ram,
+                                                                      mem_addr_mapping);
+                                               if (mem_size)
+                                                       break;
+                                               offset += 2;
+                                       }
+                               } else
+                                       mem_size = RBIOS8(offset);
+                       } else {
+                               mem_size = RBIOS8(offset);
+                               mem_size *= 2;  /* convert to MB */
+                       }
+               }
+       }
+
+       mem_size *= (1024 * 1024);      /* convert to bytes */
+       WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
+}
+
+void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable)
+{
+       uint16_t dyn_clk_info =
+           combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+
+       if (dyn_clk_info)
+               combios_parse_pll_table(dev, dyn_clk_info);
+}
+
+void radeon_combios_asic_init(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       uint16_t table;
+
+       /* port hardcoded mac stuff from radeonfb */
+       if (rdev->bios == NULL)
+               return;
+
+       /* ASIC INIT 1 */
+       table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE);
+       if (table)
+               combios_parse_mmio_table(dev, table);
+
+       /* PLL INIT */
+       table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE);
+       if (table)
+               combios_parse_pll_table(dev, table);
+
+       /* ASIC INIT 2 */
+       table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE);
+       if (table)
+               combios_parse_mmio_table(dev, table);
+
+       if (!(rdev->flags & RADEON_IS_IGP)) {
+               /* ASIC INIT 4 */
+               table =
+                   combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE);
+               if (table)
+                       combios_parse_mmio_table(dev, table);
+
+               /* RAM RESET */
+               table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE);
+               if (table)
+                       combios_parse_ram_reset_table(dev, table);
+
+               /* ASIC INIT 3 */
+               table =
+                   combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE);
+               if (table)
+                       combios_parse_mmio_table(dev, table);
+
+               /* write CONFIG_MEMSIZE */
+               combios_write_ram_size(dev);
+       }
+
+       /* DYN CLK 1 */
+       table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+       if (table)
+               combios_parse_pll_table(dev, table);
+
+}
+
+void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t bios_0_scratch, bios_6_scratch, bios_7_scratch;
+
+       bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+       bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+       bios_7_scratch = RREG32(RADEON_BIOS_7_SCRATCH);
+
+       /* let the bios control the backlight */
+       bios_0_scratch &= ~RADEON_DRIVER_BRIGHTNESS_EN;
+
+       /* tell the bios not to handle mode switching */
+       bios_6_scratch |= (RADEON_DISPLAY_SWITCHING_DIS |
+                          RADEON_ACC_MODE_CHANGE);
+
+       /* tell the bios a driver is loaded */
+       bios_7_scratch |= RADEON_DRV_LOADED;
+
+       WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+       WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+       WREG32(RADEON_BIOS_7_SCRATCH, bios_7_scratch);
+}
+
+void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t bios_6_scratch;
+
+       bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+       if (lock)
+               bios_6_scratch |= RADEON_DRIVER_CRITICAL;
+       else
+               bios_6_scratch &= ~RADEON_DRIVER_CRITICAL;
+
+       WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+                                     struct drm_encoder *encoder,
+                                     bool connected)
+{
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_connector *radeon_connector =
+           to_radeon_connector(connector);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t bios_4_scratch = RREG32(RADEON_BIOS_4_SCRATCH);
+       uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+       if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("TV1 connected\n");
+                       /* fix me */
+                       bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
+                       /*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
+                       bios_5_scratch |= RADEON_TV1_ON;
+                       bios_5_scratch |= RADEON_ACC_REQ_TV1;
+               } else {
+                       DRM_DEBUG("TV1 disconnected\n");
+                       bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
+                       bios_5_scratch &= ~RADEON_TV1_ON;
+                       bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("LCD1 connected\n");
+                       bios_4_scratch |= RADEON_LCD1_ATTACHED;
+                       bios_5_scratch |= RADEON_LCD1_ON;
+                       bios_5_scratch |= RADEON_ACC_REQ_LCD1;
+               } else {
+                       DRM_DEBUG("LCD1 disconnected\n");
+                       bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
+                       bios_5_scratch &= ~RADEON_LCD1_ON;
+                       bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("CRT1 connected\n");
+                       bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
+                       bios_5_scratch |= RADEON_CRT1_ON;
+                       bios_5_scratch |= RADEON_ACC_REQ_CRT1;
+               } else {
+                       DRM_DEBUG("CRT1 disconnected\n");
+                       bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
+                       bios_5_scratch &= ~RADEON_CRT1_ON;
+                       bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("CRT2 connected\n");
+                       bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
+                       bios_5_scratch |= RADEON_CRT2_ON;
+                       bios_5_scratch |= RADEON_ACC_REQ_CRT2;
+               } else {
+                       DRM_DEBUG("CRT2 disconnected\n");
+                       bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
+                       bios_5_scratch &= ~RADEON_CRT2_ON;
+                       bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("DFP1 connected\n");
+                       bios_4_scratch |= RADEON_DFP1_ATTACHED;
+                       bios_5_scratch |= RADEON_DFP1_ON;
+                       bios_5_scratch |= RADEON_ACC_REQ_DFP1;
+               } else {
+                       DRM_DEBUG("DFP1 disconnected\n");
+                       bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
+                       bios_5_scratch &= ~RADEON_DFP1_ON;
+                       bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
+               }
+       }
+       if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+           (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+               if (connected) {
+                       DRM_DEBUG("DFP2 connected\n");
+                       bios_4_scratch |= RADEON_DFP2_ATTACHED;
+                       bios_5_scratch |= RADEON_DFP2_ON;
+                       bios_5_scratch |= RADEON_ACC_REQ_DFP2;
+               } else {
+                       DRM_DEBUG("DFP2 disconnected\n");
+                       bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
+                       bios_5_scratch &= ~RADEON_DFP2_ON;
+                       bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
+               }
+       }
+       WREG32(RADEON_BIOS_4_SCRATCH, bios_4_scratch);
+       WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+       if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+               bios_5_scratch &= ~RADEON_TV1_CRTC_MASK;
+               bios_5_scratch |= (crtc << RADEON_TV1_CRTC_SHIFT);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+               bios_5_scratch &= ~RADEON_CRT1_CRTC_MASK;
+               bios_5_scratch |= (crtc << RADEON_CRT1_CRTC_SHIFT);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+               bios_5_scratch &= ~RADEON_CRT2_CRTC_MASK;
+               bios_5_scratch |= (crtc << RADEON_CRT2_CRTC_SHIFT);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+               bios_5_scratch &= ~RADEON_LCD1_CRTC_MASK;
+               bios_5_scratch |= (crtc << RADEON_LCD1_CRTC_SHIFT);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+               bios_5_scratch &= ~RADEON_DFP1_CRTC_MASK;
+               bios_5_scratch |= (crtc << RADEON_DFP1_CRTC_SHIFT);
+       }
+       if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+               bios_5_scratch &= ~RADEON_DFP2_CRTC_MASK;
+               bios_5_scratch |= (crtc << RADEON_DFP2_CRTC_SHIFT);
+       }
+       WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+       if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+               if (on)
+                       bios_6_scratch |= RADEON_TV_DPMS_ON;
+               else
+                       bios_6_scratch &= ~RADEON_TV_DPMS_ON;
+       }
+       if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+               if (on)
+                       bios_6_scratch |= RADEON_CRT_DPMS_ON;
+               else
+                       bios_6_scratch &= ~RADEON_CRT_DPMS_ON;
+       }
+       if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+               if (on)
+                       bios_6_scratch |= RADEON_LCD_DPMS_ON;
+               else
+                       bios_6_scratch &= ~RADEON_LCD_DPMS_ON;
+       }
+       if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+               if (on)
+                       bios_6_scratch |= RADEON_DFP_DPMS_ON;
+               else
+                       bios_6_scratch &= ~RADEON_DFP_DPMS_ON;
+       }
+       WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
new file mode 100644 (file)
index 0000000..70ede6a
--- /dev/null
@@ -0,0 +1,603 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+                                     struct drm_encoder *encoder,
+                                     bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+                                      struct drm_encoder *encoder,
+                                      bool connected);
+
+static void
+radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
+{
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_encoder *best_encoder = NULL;
+       struct drm_encoder *encoder = NULL;
+       struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+       struct drm_mode_object *obj;
+       bool connected;
+       int i;
+
+       best_encoder = connector_funcs->best_encoder(connector);
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == 0)
+                       break;
+
+               obj = drm_mode_object_find(connector->dev,
+                                          connector->encoder_ids[i],
+                                          DRM_MODE_OBJECT_ENCODER);
+               if (!obj)
+                       continue;
+
+               encoder = obj_to_encoder(obj);
+
+               if ((encoder == best_encoder) && (status == connector_status_connected))
+                       connected = true;
+               else
+                       connected = false;
+
+               if (rdev->is_atom_bios)
+                       radeon_atombios_connected_scratch_regs(connector, encoder, connected);
+               else
+                       radeon_combios_connected_scratch_regs(connector, encoder, connected);
+
+       }
+}
+
+struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
+{
+       int enc_id = connector->encoder_ids[0];
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+
+       /* pick the encoder ids */
+       if (enc_id) {
+               obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+               if (!obj)
+                       return NULL;
+               encoder = obj_to_encoder(obj);
+               return encoder;
+       }
+       return NULL;
+}
+
+static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_display_mode *mode = NULL;
+       struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
+
+       if (native_mode->panel_xres != 0 &&
+           native_mode->panel_yres != 0 &&
+           native_mode->dotclock != 0) {
+               mode = drm_mode_create(dev);
+
+               mode->hdisplay = native_mode->panel_xres;
+               mode->vdisplay = native_mode->panel_yres;
+
+               mode->htotal = mode->hdisplay + native_mode->hblank;
+               mode->hsync_start = mode->hdisplay + native_mode->hoverplus;
+               mode->hsync_end = mode->hsync_start + native_mode->hsync_width;
+               mode->vtotal = mode->vdisplay + native_mode->vblank;
+               mode->vsync_start = mode->vdisplay + native_mode->voverplus;
+               mode->vsync_end = mode->vsync_start + native_mode->vsync_width;
+               mode->clock = native_mode->dotclock;
+               mode->flags = 0;
+
+               mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+               drm_mode_set_name(mode);
+
+               DRM_DEBUG("Adding native panel mode %s\n", mode->name);
+       }
+       return mode;
+}
+
+int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
+                                 uint64_t val)
+{
+       return 0;
+}
+
+
+static int radeon_lvds_get_modes(struct drm_connector *connector)
+{
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+       struct drm_encoder *encoder;
+       int ret = 0;
+       struct drm_display_mode *mode;
+
+       if (radeon_connector->ddc_bus) {
+               ret = radeon_ddc_get_modes(radeon_connector);
+               if (ret > 0) {
+                       return ret;
+               }
+       }
+
+       encoder = radeon_best_single_encoder(connector);
+       if (!encoder)
+               return 0;
+
+       /* we have no EDID modes */
+       mode = radeon_fp_native_mode(encoder);
+       if (mode) {
+               ret = 1;
+               drm_mode_probed_add(connector, mode);
+       }
+       return ret;
+}
+
+static int radeon_lvds_mode_valid(struct drm_connector *connector,
+                                 struct drm_display_mode *mode)
+{
+       return MODE_OK;
+}
+
+static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
+{
+       enum drm_connector_status ret = connector_status_connected;
+       /* check acpi lid status ??? */
+       radeon_connector_update_scratch_regs(connector, ret);
+       return ret;
+}
+
+static void radeon_connector_destroy(struct drm_connector *connector)
+{
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+       if (radeon_connector->ddc_bus)
+               radeon_i2c_destroy(radeon_connector->ddc_bus);
+       kfree(radeon_connector->con_priv);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
+       .get_modes = radeon_lvds_get_modes,
+       .mode_valid = radeon_lvds_mode_valid,
+       .best_encoder = radeon_best_single_encoder,
+};
+
+struct drm_connector_funcs radeon_lvds_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = radeon_lvds_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = radeon_connector_destroy,
+       .set_property = radeon_connector_set_property,
+};
+
+static int radeon_vga_get_modes(struct drm_connector *connector)
+{
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+       int ret;
+
+       ret = radeon_ddc_get_modes(radeon_connector);
+
+       return ret;
+}
+
+static int radeon_vga_mode_valid(struct drm_connector *connector,
+                                 struct drm_display_mode *mode)
+{
+
+       return MODE_OK;
+}
+
+static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
+{
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+       struct drm_encoder *encoder;
+       struct drm_encoder_helper_funcs *encoder_funcs;
+       bool dret;
+       enum drm_connector_status ret = connector_status_disconnected;
+
+       radeon_i2c_do_lock(radeon_connector, 1);
+       dret = radeon_ddc_probe(radeon_connector);
+       radeon_i2c_do_lock(radeon_connector, 0);
+       if (dret)
+               ret = connector_status_connected;
+       else {
+               /* if EDID fails to a load detect */
+               encoder = radeon_best_single_encoder(connector);
+               if (!encoder)
+                       ret = connector_status_disconnected;
+               else {
+                       encoder_funcs = encoder->helper_private;
+                       ret = encoder_funcs->detect(encoder, connector);
+               }
+       }
+
+       radeon_connector_update_scratch_regs(connector, ret);
+       return ret;
+}
+
+struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
+       .get_modes = radeon_vga_get_modes,
+       .mode_valid = radeon_vga_mode_valid,
+       .best_encoder = radeon_best_single_encoder,
+};
+
+struct drm_connector_funcs radeon_vga_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = radeon_vga_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = radeon_connector_destroy,
+       .set_property = radeon_connector_set_property,
+};
+
+static int radeon_dvi_get_modes(struct drm_connector *connector)
+{
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+       int ret;
+
+       ret = radeon_ddc_get_modes(radeon_connector);
+       /* reset scratch regs here since radeon_dvi_detect doesn't check digital bit */
+       radeon_connector_update_scratch_regs(connector, connector_status_connected);
+       return ret;
+}
+
+static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
+{
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+       struct drm_encoder *encoder;
+       struct drm_encoder_helper_funcs *encoder_funcs;
+       struct drm_mode_object *obj;
+       int i;
+       enum drm_connector_status ret = connector_status_disconnected;
+       bool dret;
+
+       radeon_i2c_do_lock(radeon_connector, 1);
+       dret = radeon_ddc_probe(radeon_connector);
+       radeon_i2c_do_lock(radeon_connector, 0);
+       if (dret)
+               ret = connector_status_connected;
+       else {
+               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+                       if (connector->encoder_ids[i] == 0)
+                               break;
+
+                       obj = drm_mode_object_find(connector->dev,
+                                                  connector->encoder_ids[i],
+                                                  DRM_MODE_OBJECT_ENCODER);
+                       if (!obj)
+                               continue;
+
+                       encoder = obj_to_encoder(obj);
+
+                       encoder_funcs = encoder->helper_private;
+                       if (encoder_funcs->detect) {
+                               ret = encoder_funcs->detect(encoder, connector);
+                               if (ret == connector_status_connected) {
+                                       radeon_connector->use_digital = 0;
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       /* updated in get modes as well since we need to know if it's analog or digital */
+       radeon_connector_update_scratch_regs(connector, ret);
+       return ret;
+}
+
+/* okay need to be smart in here about which encoder to pick */
+struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
+{
+       int enc_id = connector->encoder_ids[0];
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+       int i;
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == 0)
+                       break;
+
+               obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+               if (!obj)
+                       continue;
+
+               encoder = obj_to_encoder(obj);
+
+               if (radeon_connector->use_digital) {
+                       if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
+                               return encoder;
+               } else {
+                       if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
+                           encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
+                               return encoder;
+               }
+       }
+
+       /* see if we have a default encoder  TODO */
+
+       /* then check use digitial */
+       /* pick the first one */
+       if (enc_id) {
+               obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+               if (!obj)
+                       return NULL;
+               encoder = obj_to_encoder(obj);
+               return encoder;
+       }
+       return NULL;
+}
+
+struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
+       .get_modes = radeon_dvi_get_modes,
+       .mode_valid = radeon_vga_mode_valid,
+       .best_encoder = radeon_dvi_encoder,
+};
+
+struct drm_connector_funcs radeon_dvi_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = radeon_dvi_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = radeon_connector_set_property,
+       .destroy = radeon_connector_destroy,
+};
+
+void
+radeon_add_atom_connector(struct drm_device *dev,
+                         uint32_t connector_id,
+                         uint32_t supported_device,
+                         int connector_type,
+                         struct radeon_i2c_bus_rec *i2c_bus,
+                         bool linkb,
+                         uint32_t igp_lane_info)
+{
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+       struct radeon_connector_atom_dig *radeon_dig_connector;
+       uint32_t subpixel_order = SubPixelNone;
+
+       /* fixme - tv/cv/din */
+       if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
+           (connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+           (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+           (connector_type == DRM_MODE_CONNECTOR_9PinDIN))
+               return;
+
+       /* see if we already added it */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               radeon_connector = to_radeon_connector(connector);
+               if (radeon_connector->connector_id == connector_id) {
+                       radeon_connector->devices |= supported_device;
+                       return;
+               }
+       }
+
+       radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
+       if (!radeon_connector)
+               return;
+
+       connector = &radeon_connector->base;
+
+       radeon_connector->connector_id = connector_id;
+       radeon_connector->devices = supported_device;
+       switch (connector_type) {
+       case DRM_MODE_CONNECTOR_VGA:
+               drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               if (i2c_bus->valid) {
+                       radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
+                       if (!radeon_connector->ddc_bus)
+                               goto failed;
+               }
+               break;
+       case DRM_MODE_CONNECTOR_DVIA:
+               drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               if (i2c_bus->valid) {
+                       radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+                       if (!radeon_connector->ddc_bus)
+                               goto failed;
+               }
+               break;
+       case DRM_MODE_CONNECTOR_DVII:
+       case DRM_MODE_CONNECTOR_DVID:
+               radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+               if (!radeon_dig_connector)
+                       goto failed;
+               radeon_dig_connector->linkb = linkb;
+               radeon_dig_connector->igp_lane_info = igp_lane_info;
+               radeon_connector->con_priv = radeon_dig_connector;
+               drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               if (i2c_bus->valid) {
+                       radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+                       if (!radeon_connector->ddc_bus)
+                               goto failed;
+               }
+               subpixel_order = SubPixelHorizontalRGB;
+               break;
+       case DRM_MODE_CONNECTOR_HDMIA:
+       case DRM_MODE_CONNECTOR_HDMIB:
+               radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+               if (!radeon_dig_connector)
+                       goto failed;
+               radeon_dig_connector->linkb = linkb;
+               radeon_dig_connector->igp_lane_info = igp_lane_info;
+               radeon_connector->con_priv = radeon_dig_connector;
+               drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               if (i2c_bus->valid) {
+                       radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
+                       if (!radeon_connector->ddc_bus)
+                               goto failed;
+               }
+               subpixel_order = SubPixelHorizontalRGB;
+               break;
+       case DRM_MODE_CONNECTOR_DisplayPort:
+               radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+               if (!radeon_dig_connector)
+                       goto failed;
+               radeon_dig_connector->linkb = linkb;
+               radeon_dig_connector->igp_lane_info = igp_lane_info;
+               radeon_connector->con_priv = radeon_dig_connector;
+               drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               if (i2c_bus->valid) {
+                       radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
+                       if (!radeon_connector->ddc_bus)
+                               goto failed;
+               }
+               subpixel_order = SubPixelHorizontalRGB;
+               break;
+       case DRM_MODE_CONNECTOR_SVIDEO:
+       case DRM_MODE_CONNECTOR_Composite:
+       case DRM_MODE_CONNECTOR_9PinDIN:
+               break;
+       case DRM_MODE_CONNECTOR_LVDS:
+               radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+               if (!radeon_dig_connector)
+                       goto failed;
+               radeon_dig_connector->linkb = linkb;
+               radeon_dig_connector->igp_lane_info = igp_lane_info;
+               radeon_connector->con_priv = radeon_dig_connector;
+               drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+               if (i2c_bus->valid) {
+                       radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
+                       if (!radeon_connector->ddc_bus)
+                               goto failed;
+               }
+               subpixel_order = SubPixelHorizontalRGB;
+               break;
+       }
+
+       connector->display_info.subpixel_order = subpixel_order;
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed:
+       if (radeon_connector->ddc_bus)
+               radeon_i2c_destroy(radeon_connector->ddc_bus);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+void
+radeon_add_legacy_connector(struct drm_device *dev,
+                           uint32_t connector_id,
+                           uint32_t supported_device,
+                           int connector_type,
+                           struct radeon_i2c_bus_rec *i2c_bus)
+{
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+       uint32_t subpixel_order = SubPixelNone;
+
+       /* fixme - tv/cv/din */
+       if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
+           (connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+           (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+           (connector_type == DRM_MODE_CONNECTOR_9PinDIN))
+               return;
+
+       /* see if we already added it */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               radeon_connector = to_radeon_connector(connector);
+               if (radeon_connector->connector_id == connector_id) {
+                       radeon_connector->devices |= supported_device;
+                       return;
+               }
+       }
+
+       radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
+       if (!radeon_connector)
+               return;
+
+       connector = &radeon_connector->base;
+
+       radeon_connector->connector_id = connector_id;
+       radeon_connector->devices = supported_device;
+       switch (connector_type) {
+       case DRM_MODE_CONNECTOR_VGA:
+               drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               if (i2c_bus->valid) {
+                       radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
+                       if (!radeon_connector->ddc_bus)
+                               goto failed;
+               }
+               break;
+       case DRM_MODE_CONNECTOR_DVIA:
+               drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               if (i2c_bus->valid) {
+                       radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+                       if (!radeon_connector->ddc_bus)
+                               goto failed;
+               }
+               break;
+       case DRM_MODE_CONNECTOR_DVII:
+       case DRM_MODE_CONNECTOR_DVID:
+               drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               if (i2c_bus->valid) {
+                       radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+                       if (!radeon_connector->ddc_bus)
+                               goto failed;
+               }
+               subpixel_order = SubPixelHorizontalRGB;
+               break;
+       case DRM_MODE_CONNECTOR_SVIDEO:
+       case DRM_MODE_CONNECTOR_Composite:
+       case DRM_MODE_CONNECTOR_9PinDIN:
+               break;
+       case DRM_MODE_CONNECTOR_LVDS:
+               drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+               if (i2c_bus->valid) {
+                       radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
+                       if (!radeon_connector->ddc_bus)
+                               goto failed;
+               }
+               subpixel_order = SubPixelHorizontalRGB;
+               break;
+       }
+
+       connector->display_info.subpixel_order = subpixel_order;
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed:
+       if (radeon_connector->ddc_bus)
+               radeon_i2c_destroy(radeon_connector->ddc_bus);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
new file mode 100644 (file)
index 0000000..b843f9b
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2008 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+                        struct radeon_cs_packet *pkt);
+
+int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+{
+       struct drm_device *ddev = p->rdev->ddev;
+       struct radeon_cs_chunk *chunk;
+       unsigned i, j;
+       bool duplicate;
+
+       if (p->chunk_relocs_idx == -1) {
+               return 0;
+       }
+       chunk = &p->chunks[p->chunk_relocs_idx];
+       /* FIXME: we assume that each relocs use 4 dwords */
+       p->nrelocs = chunk->length_dw / 4;
+       p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
+       if (p->relocs_ptr == NULL) {
+               return -ENOMEM;
+       }
+       p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+       if (p->relocs == NULL) {
+               return -ENOMEM;
+       }
+       for (i = 0; i < p->nrelocs; i++) {
+               struct drm_radeon_cs_reloc *r;
+
+               duplicate = false;
+               r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
+               for (j = 0; j < p->nrelocs; j++) {
+                       if (r->handle == p->relocs[j].handle) {
+                               p->relocs_ptr[i] = &p->relocs[j];
+                               duplicate = true;
+                               break;
+                       }
+               }
+               if (!duplicate) {
+                       p->relocs[i].gobj = drm_gem_object_lookup(ddev,
+                                                                 p->filp,
+                                                                 r->handle);
+                       if (p->relocs[i].gobj == NULL) {
+                               DRM_ERROR("gem object lookup failed 0x%x\n",
+                                         r->handle);
+                               return -EINVAL;
+                       }
+                       p->relocs_ptr[i] = &p->relocs[i];
+                       p->relocs[i].robj = p->relocs[i].gobj->driver_private;
+                       p->relocs[i].lobj.robj = p->relocs[i].robj;
+                       p->relocs[i].lobj.rdomain = r->read_domains;
+                       p->relocs[i].lobj.wdomain = r->write_domain;
+                       p->relocs[i].handle = r->handle;
+                       p->relocs[i].flags = r->flags;
+                       INIT_LIST_HEAD(&p->relocs[i].lobj.list);
+                       radeon_object_list_add_object(&p->relocs[i].lobj,
+                                                     &p->validated);
+               }
+       }
+       return radeon_object_list_validate(&p->validated, p->ib->fence);
+}
+
+int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+{
+       struct drm_radeon_cs *cs = data;
+       uint64_t *chunk_array_ptr;
+       unsigned size, i;
+
+       if (!cs->num_chunks) {
+               return 0;
+       }
+       /* get chunks */
+       INIT_LIST_HEAD(&p->validated);
+       p->idx = 0;
+       p->chunk_ib_idx = -1;
+       p->chunk_relocs_idx = -1;
+       p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
+       if (p->chunks_array == NULL) {
+               return -ENOMEM;
+       }
+       chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
+       if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
+                              sizeof(uint64_t)*cs->num_chunks)) {
+               return -EFAULT;
+       }
+       p->nchunks = cs->num_chunks;
+       p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
+       if (p->chunks == NULL) {
+               return -ENOMEM;
+       }
+       for (i = 0; i < p->nchunks; i++) {
+               struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
+               struct drm_radeon_cs_chunk user_chunk;
+               uint32_t __user *cdata;
+
+               chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
+               if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
+                                      sizeof(struct drm_radeon_cs_chunk))) {
+                       return -EFAULT;
+               }
+               p->chunks[i].chunk_id = user_chunk.chunk_id;
+               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
+                       p->chunk_relocs_idx = i;
+               }
+               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+                       p->chunk_ib_idx = i;
+               }
+               p->chunks[i].length_dw = user_chunk.length_dw;
+               cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
+
+               p->chunks[i].kdata = NULL;
+               size = p->chunks[i].length_dw * sizeof(uint32_t);
+               p->chunks[i].kdata = kzalloc(size, GFP_KERNEL);
+               if (p->chunks[i].kdata == NULL) {
+                       return -ENOMEM;
+               }
+               if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
+                       return -EFAULT;
+               }
+       }
+       if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+               DRM_ERROR("cs IB too big: %d\n",
+                         p->chunks[p->chunk_ib_idx].length_dw);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:    parser structure holding parsing context.
+ * @error:     error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+{
+       unsigned i;
+
+       if (error) {
+               radeon_object_list_unvalidate(&parser->validated);
+       } else {
+               radeon_object_list_clean(&parser->validated);
+       }
+       for (i = 0; i < parser->nrelocs; i++) {
+               if (parser->relocs[i].gobj) {
+                       mutex_lock(&parser->rdev->ddev->struct_mutex);
+                       drm_gem_object_unreference(parser->relocs[i].gobj);
+                       mutex_unlock(&parser->rdev->ddev->struct_mutex);
+               }
+       }
+       kfree(parser->relocs);
+       kfree(parser->relocs_ptr);
+       for (i = 0; i < parser->nchunks; i++) {
+               kfree(parser->chunks[i].kdata);
+       }
+       kfree(parser->chunks);
+       kfree(parser->chunks_array);
+       radeon_ib_free(parser->rdev, &parser->ib);
+}
+
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_cs_parser parser;
+       struct radeon_cs_chunk *ib_chunk;
+       int r;
+
+       mutex_lock(&rdev->cs_mutex);
+       if (rdev->gpu_lockup) {
+               mutex_unlock(&rdev->cs_mutex);
+               return -EINVAL;
+       }
+       /* initialize parser */
+       memset(&parser, 0, sizeof(struct radeon_cs_parser));
+       parser.filp = filp;
+       parser.rdev = rdev;
+       r = radeon_cs_parser_init(&parser, data);
+       if (r) {
+               DRM_ERROR("Failed to initialize parser !\n");
+               radeon_cs_parser_fini(&parser, r);
+               mutex_unlock(&rdev->cs_mutex);
+               return r;
+       }
+       r =  radeon_ib_get(rdev, &parser.ib);
+       if (r) {
+               DRM_ERROR("Failed to get ib !\n");
+               radeon_cs_parser_fini(&parser, r);
+               mutex_unlock(&rdev->cs_mutex);
+               return r;
+       }
+       r = radeon_cs_parser_relocs(&parser);
+       if (r) {
+               DRM_ERROR("Failed to parse relocation !\n");
+               radeon_cs_parser_fini(&parser, r);
+               mutex_unlock(&rdev->cs_mutex);
+               return r;
+       }
+       /* Copy the packet into the IB, the parser will read from the
+        * input memory (cached) and write to the IB (which can be
+        * uncached). */
+       ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+       parser.ib->length_dw = ib_chunk->length_dw;
+       memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
+       r = radeon_cs_parse(&parser);
+       if (r) {
+               DRM_ERROR("Invalid command stream !\n");
+               radeon_cs_parser_fini(&parser, r);
+               mutex_unlock(&rdev->cs_mutex);
+               return r;
+       }
+       r = radeon_ib_schedule(rdev, parser.ib);
+       if (r) {
+               DRM_ERROR("Faild to schedule IB !\n");
+       }
+       radeon_cs_parser_fini(&parser, r);
+       mutex_unlock(&rdev->cs_mutex);
+       return r;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
new file mode 100644 (file)
index 0000000..5232441
--- /dev/null
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+       struct radeon_device *rdev = crtc->dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       uint32_t cur_lock;
+
+       if (ASIC_IS_AVIVO(rdev)) {
+               cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
+               if (lock)
+                       cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
+               else
+                       cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
+               WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+       } else {
+               cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
+               if (lock)
+                       cur_lock |= RADEON_CUR_LOCK;
+               else
+                       cur_lock &= ~RADEON_CUR_LOCK;
+               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
+       }
+}
+
+static void radeon_hide_cursor(struct drm_crtc *crtc)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_device *rdev = crtc->dev->dev_private;
+
+       if (ASIC_IS_AVIVO(rdev)) {
+               WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
+               WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+       } else {
+               switch (radeon_crtc->crtc_id) {
+               case 0:
+                       WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+                       break;
+               case 1:
+                       WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+                       break;
+               default:
+                       return;
+               }
+               WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
+       }
+}
+
+static void radeon_show_cursor(struct drm_crtc *crtc)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_device *rdev = crtc->dev->dev_private;
+
+       if (ASIC_IS_AVIVO(rdev)) {
+               WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
+               WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
+                            (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+       } else {
+               switch (radeon_crtc->crtc_id) {
+               case 0:
+                       WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+                       break;
+               case 1:
+                       WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+                       break;
+               default:
+                       return;
+               }
+
+               WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
+                                         (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
+                        ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
+       }
+}
+
+static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
+                             uint32_t gpu_addr)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_device *rdev = crtc->dev->dev_private;
+
+       if (ASIC_IS_AVIVO(rdev))
+               WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+       else
+               /* offset is from DISP(2)_BASE_ADDRESS */
+               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr);
+}
+
+int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+                          struct drm_file *file_priv,
+                          uint32_t handle,
+                          uint32_t width,
+                          uint32_t height)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_gem_object *obj;
+       uint64_t gpu_addr;
+       int ret;
+
+       if (!handle) {
+               /* turn off cursor */
+               radeon_hide_cursor(crtc);
+               obj = NULL;
+               goto unpin;
+       }
+
+       if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+               DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+               return -EINVAL;
+       }
+
+       radeon_crtc->cursor_width = width;
+       radeon_crtc->cursor_height = height;
+
+       obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+       if (!obj) {
+               DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
+               return -EINVAL;
+       }
+
+       ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+       if (ret)
+               goto fail;
+
+       radeon_lock_cursor(crtc, true);
+       /* XXX only 27 bit offset for legacy cursor */
+       radeon_set_cursor(crtc, obj, gpu_addr);
+       radeon_show_cursor(crtc);
+       radeon_lock_cursor(crtc, false);
+
+unpin:
+       if (radeon_crtc->cursor_bo) {
+               radeon_gem_object_unpin(radeon_crtc->cursor_bo);
+               mutex_lock(&crtc->dev->struct_mutex);
+               drm_gem_object_unreference(radeon_crtc->cursor_bo);
+               mutex_unlock(&crtc->dev->struct_mutex);
+       }
+
+       radeon_crtc->cursor_bo = obj;
+       return 0;
+fail:
+       mutex_lock(&crtc->dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&crtc->dev->struct_mutex);
+
+       return 0;
+}
+
+int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+                           int x, int y)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_device *rdev = crtc->dev->dev_private;
+       int xorigin = 0, yorigin = 0;
+
+       if (x < 0)
+               xorigin = -x + 1;
+       if (y < 0)
+               yorigin = -y + 1;
+       if (xorigin >= CURSOR_WIDTH)
+               xorigin = CURSOR_WIDTH - 1;
+       if (yorigin >= CURSOR_HEIGHT)
+               yorigin = CURSOR_HEIGHT - 1;
+
+       radeon_lock_cursor(crtc, true);
+       if (ASIC_IS_AVIVO(rdev)) {
+               int w = radeon_crtc->cursor_width;
+               int i = 0;
+               struct drm_crtc *crtc_p;
+
+               /* avivo cursor are offset into the total surface */
+               x += crtc->x;
+               y += crtc->y;
+               DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+               /* avivo cursor image can't end on 128 pixel boundry or
+                * go past the end of the frame if both crtcs are enabled
+                */
+               list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
+                       if (crtc_p->enabled)
+                               i++;
+               }
+               if (i > 1) {
+                       int cursor_end, frame_end;
+
+                       cursor_end = x - xorigin + w;
+                       frame_end = crtc->x + crtc->mode.crtc_hdisplay;
+                       if (cursor_end >= frame_end) {
+                               w = w - (cursor_end - frame_end);
+                               if (!(frame_end & 0x7f))
+                                       w--;
+                       } else {
+                               if (!(cursor_end & 0x7f))
+                                       w--;
+                       }
+                       if (w <= 0)
+                               w = 1;
+               }
+
+               WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
+                            ((xorigin ? 0 : x) << 16) |
+                            (yorigin ? 0 : y));
+               WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+               WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
+                      ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+       } else {
+               if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+                       y *= 2;
+
+               WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
+                      (RADEON_CUR_LOCK
+                       | (xorigin << 16)
+                       | yorigin));
+               WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
+                      (RADEON_CUR_LOCK
+                       | ((xorigin ? 0 : x) << 16)
+                       | (yorigin ? 0 : y)));
+       }
+       radeon_lock_cursor(crtc, false);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
new file mode 100644 (file)
index 0000000..5fd2b63
--- /dev/null
@@ -0,0 +1,813 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/console.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * GPU scratch registers helpers function.
+ */
+static void radeon_scratch_init(struct radeon_device *rdev)
+{
+       int i;
+
+       /* FIXME: check this out */
+       if (rdev->family < CHIP_R300) {
+               rdev->scratch.num_reg = 5;
+       } else {
+               rdev->scratch.num_reg = 7;
+       }
+       for (i = 0; i < rdev->scratch.num_reg; i++) {
+               rdev->scratch.free[i] = true;
+               rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
+       }
+}
+
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
+{
+       int i;
+
+       for (i = 0; i < rdev->scratch.num_reg; i++) {
+               if (rdev->scratch.free[i]) {
+                       rdev->scratch.free[i] = false;
+                       *reg = rdev->scratch.reg[i];
+                       return 0;
+               }
+       }
+       return -EINVAL;
+}
+
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
+{
+       int i;
+
+       for (i = 0; i < rdev->scratch.num_reg; i++) {
+               if (rdev->scratch.reg[i] == reg) {
+                       rdev->scratch.free[i] = true;
+                       return;
+               }
+       }
+}
+
+/*
+ * MC common functions
+ */
+int radeon_mc_setup(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       /* Some chips have an "issue" with the memory controller, the
+        * location must be aligned to the size. We just align it down,
+        * too bad if we walk over the top of system memory, we don't
+        * use DMA without a remapped anyway.
+        * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
+        */
+       /* FGLRX seems to setup like this, VRAM a 0, then GART.
+        */
+       /*
+        * Note: from R6xx the address space is 40bits but here we only
+        * use 32bits (still have to see a card which would exhaust 4G
+        * address space).
+        */
+       if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
+               /* vram location was already setup try to put gtt after
+                * if it fits */
+               tmp = rdev->mc.vram_location + rdev->mc.vram_size;
+               tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
+               if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
+                       rdev->mc.gtt_location = tmp;
+               } else {
+                       if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
+                               printk(KERN_ERR "[drm] GTT too big to fit "
+                                      "before or after vram location.\n");
+                               return -EINVAL;
+                       }
+                       rdev->mc.gtt_location = 0;
+               }
+       } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
+               /* gtt location was already setup try to put vram before
+                * if it fits */
+               if (rdev->mc.vram_size < rdev->mc.gtt_location) {
+                       rdev->mc.vram_location = 0;
+               } else {
+                       tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
+                       tmp += (rdev->mc.vram_size - 1);
+                       tmp &= ~(rdev->mc.vram_size - 1);
+                       if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) {
+                               rdev->mc.vram_location = tmp;
+                       } else {
+                               printk(KERN_ERR "[drm] vram too big to fit "
+                                      "before or after GTT location.\n");
+                               return -EINVAL;
+                       }
+               }
+       } else {
+               rdev->mc.vram_location = 0;
+               rdev->mc.gtt_location = rdev->mc.vram_size;
+       }
+       DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20);
+       DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
+                rdev->mc.vram_location,
+                rdev->mc.vram_location + rdev->mc.vram_size - 1);
+       DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
+       DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
+                rdev->mc.gtt_location,
+                rdev->mc.gtt_location + rdev->mc.gtt_size - 1);
+       return 0;
+}
+
+
+/*
+ * GPU helpers function.
+ */
+static bool radeon_card_posted(struct radeon_device *rdev)
+{
+       uint32_t reg;
+
+       /* first check CRTCs */
+       if (ASIC_IS_AVIVO(rdev)) {
+               reg = RREG32(AVIVO_D1CRTC_CONTROL) |
+                     RREG32(AVIVO_D2CRTC_CONTROL);
+               if (reg & AVIVO_CRTC_EN) {
+                       return true;
+               }
+       } else {
+               reg = RREG32(RADEON_CRTC_GEN_CNTL) |
+                     RREG32(RADEON_CRTC2_GEN_CNTL);
+               if (reg & RADEON_CRTC_EN) {
+                       return true;
+               }
+       }
+
+       /* then check MEM_SIZE, in case the crtcs are off */
+       if (rdev->family >= CHIP_R600)
+               reg = RREG32(R600_CONFIG_MEMSIZE);
+       else
+               reg = RREG32(RADEON_CONFIG_MEMSIZE);
+
+       if (reg)
+               return true;
+
+       return false;
+
+}
+
+
+/*
+ * Registers accessors functions.
+ */
+uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+       BUG_ON(1);
+       return 0;
+}
+
+void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
+                 reg, v);
+       BUG_ON(1);
+}
+
+void radeon_register_accessor_init(struct radeon_device *rdev)
+{
+       rdev->mm_rreg = &r100_mm_rreg;
+       rdev->mm_wreg = &r100_mm_wreg;
+       rdev->mc_rreg = &radeon_invalid_rreg;
+       rdev->mc_wreg = &radeon_invalid_wreg;
+       rdev->pll_rreg = &radeon_invalid_rreg;
+       rdev->pll_wreg = &radeon_invalid_wreg;
+       rdev->pcie_rreg = &radeon_invalid_rreg;
+       rdev->pcie_wreg = &radeon_invalid_wreg;
+       rdev->pciep_rreg = &radeon_invalid_rreg;
+       rdev->pciep_wreg = &radeon_invalid_wreg;
+
+       /* Don't change order as we are overridding accessor. */
+       if (rdev->family < CHIP_RV515) {
+               rdev->pcie_rreg = &rv370_pcie_rreg;
+               rdev->pcie_wreg = &rv370_pcie_wreg;
+       }
+       if (rdev->family >= CHIP_RV515) {
+               rdev->pcie_rreg = &rv515_pcie_rreg;
+               rdev->pcie_wreg = &rv515_pcie_wreg;
+       }
+       /* FIXME: not sure here */
+       if (rdev->family <= CHIP_R580) {
+               rdev->pll_rreg = &r100_pll_rreg;
+               rdev->pll_wreg = &r100_pll_wreg;
+       }
+       if (rdev->family >= CHIP_RV515) {
+               rdev->mc_rreg = &rv515_mc_rreg;
+               rdev->mc_wreg = &rv515_mc_wreg;
+       }
+       if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+               rdev->mc_rreg = &rs400_mc_rreg;
+               rdev->mc_wreg = &rs400_mc_wreg;
+       }
+       if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+               rdev->mc_rreg = &rs690_mc_rreg;
+               rdev->mc_wreg = &rs690_mc_wreg;
+       }
+       if (rdev->family == CHIP_RS600) {
+               rdev->mc_rreg = &rs600_mc_rreg;
+               rdev->mc_wreg = &rs600_mc_wreg;
+       }
+       if (rdev->family >= CHIP_R600) {
+               rdev->pciep_rreg = &r600_pciep_rreg;
+               rdev->pciep_wreg = &r600_pciep_wreg;
+       }
+}
+
+
+/*
+ * ASIC
+ */
+int radeon_asic_init(struct radeon_device *rdev)
+{
+       radeon_register_accessor_init(rdev);
+       switch (rdev->family) {
+       case CHIP_R100:
+       case CHIP_RV100:
+       case CHIP_RS100:
+       case CHIP_RV200:
+       case CHIP_RS200:
+       case CHIP_R200:
+       case CHIP_RV250:
+       case CHIP_RS300:
+       case CHIP_RV280:
+               rdev->asic = &r100_asic;
+               break;
+       case CHIP_R300:
+       case CHIP_R350:
+       case CHIP_RV350:
+       case CHIP_RV380:
+               rdev->asic = &r300_asic;
+               break;
+       case CHIP_R420:
+       case CHIP_R423:
+       case CHIP_RV410:
+               rdev->asic = &r420_asic;
+               break;
+       case CHIP_RS400:
+       case CHIP_RS480:
+               rdev->asic = &rs400_asic;
+               break;
+       case CHIP_RS600:
+               rdev->asic = &rs600_asic;
+               break;
+       case CHIP_RS690:
+       case CHIP_RS740:
+               rdev->asic = &rs690_asic;
+               break;
+       case CHIP_RV515:
+               rdev->asic = &rv515_asic;
+               break;
+       case CHIP_R520:
+       case CHIP_RV530:
+       case CHIP_RV560:
+       case CHIP_RV570:
+       case CHIP_R580:
+               rdev->asic = &r520_asic;
+               break;
+       case CHIP_R600:
+       case CHIP_RV610:
+       case CHIP_RV630:
+       case CHIP_RV620:
+       case CHIP_RV635:
+       case CHIP_RV670:
+       case CHIP_RS780:
+       case CHIP_RV770:
+       case CHIP_RV730:
+       case CHIP_RV710:
+       default:
+               /* FIXME: not supported yet */
+               return -EINVAL;
+       }
+       return 0;
+}
+
+
+/*
+ * Wrapper around modesetting bits.
+ */
+int radeon_clocks_init(struct radeon_device *rdev)
+{
+       int r;
+
+       radeon_get_clock_info(rdev->ddev);
+       r = radeon_static_clocks_init(rdev->ddev);
+       if (r) {
+               return r;
+       }
+       DRM_INFO("Clocks initialized !\n");
+       return 0;
+}
+
+void radeon_clocks_fini(struct radeon_device *rdev)
+{
+}
+
+/* ATOM accessor methods */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+       struct radeon_device *rdev = info->dev->dev_private;
+       uint32_t r;
+
+       r = rdev->pll_rreg(rdev, reg);
+       return r;
+}
+
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+       struct radeon_device *rdev = info->dev->dev_private;
+
+       rdev->pll_wreg(rdev, reg, val);
+}
+
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+       struct radeon_device *rdev = info->dev->dev_private;
+       uint32_t r;
+
+       r = rdev->mc_rreg(rdev, reg);
+       return r;
+}
+
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+       struct radeon_device *rdev = info->dev->dev_private;
+
+       rdev->mc_wreg(rdev, reg, val);
+}
+
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+       struct radeon_device *rdev = info->dev->dev_private;
+
+       WREG32(reg*4, val);
+}
+
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+       struct radeon_device *rdev = info->dev->dev_private;
+       uint32_t r;
+
+       r = RREG32(reg*4);
+       return r;
+}
+
+static struct card_info atom_card_info = {
+       .dev = NULL,
+       .reg_read = cail_reg_read,
+       .reg_write = cail_reg_write,
+       .mc_read = cail_mc_read,
+       .mc_write = cail_mc_write,
+       .pll_read = cail_pll_read,
+       .pll_write = cail_pll_write,
+};
+
+int radeon_atombios_init(struct radeon_device *rdev)
+{
+       atom_card_info.dev = rdev->ddev;
+       rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
+       radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+       return 0;
+}
+
+void radeon_atombios_fini(struct radeon_device *rdev)
+{
+       kfree(rdev->mode_info.atom_context);
+}
+
+int radeon_combios_init(struct radeon_device *rdev)
+{
+       radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
+       return 0;
+}
+
+void radeon_combios_fini(struct radeon_device *rdev)
+{
+}
+
+int radeon_modeset_init(struct radeon_device *rdev);
+void radeon_modeset_fini(struct radeon_device *rdev);
+
+
+/*
+ * Radeon device.
+ */
+int radeon_device_init(struct radeon_device *rdev,
+                      struct drm_device *ddev,
+                      struct pci_dev *pdev,
+                      uint32_t flags)
+{
+       int r, ret;
+
+       DRM_INFO("radeon: Initializing kernel modesetting.\n");
+       rdev->shutdown = false;
+       rdev->ddev = ddev;
+       rdev->pdev = pdev;
+       rdev->flags = flags;
+       rdev->family = flags & RADEON_FAMILY_MASK;
+       rdev->is_atom_bios = false;
+       rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
+       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+       rdev->gpu_lockup = false;
+       /* mutex initialization are all done here so we
+        * can recall function without having locking issues */
+       mutex_init(&rdev->cs_mutex);
+       mutex_init(&rdev->ib_pool.mutex);
+       mutex_init(&rdev->cp.mutex);
+       rwlock_init(&rdev->fence_drv.lock);
+
+       if (radeon_agpmode == -1) {
+               rdev->flags &= ~RADEON_IS_AGP;
+               if (rdev->family > CHIP_RV515 ||
+                   rdev->family == CHIP_RV380 ||
+                   rdev->family == CHIP_RV410 ||
+                   rdev->family == CHIP_R423) {
+                       DRM_INFO("Forcing AGP to PCIE mode\n");
+                       rdev->flags |= RADEON_IS_PCIE;
+               } else {
+                       DRM_INFO("Forcing AGP to PCI mode\n");
+                       rdev->flags |= RADEON_IS_PCI;
+               }
+       }
+
+       /* Set asic functions */
+       r = radeon_asic_init(rdev);
+       if (r) {
+               return r;
+       }
+
+       /* Report DMA addressing limitation */
+       r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+       if (r) {
+               printk(KERN_WARNING "radeon: No suitable DMA available.\n");
+       }
+
+       /* Registers mapping */
+       /* TODO: block userspace mapping of io register */
+       rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
+       rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
+       rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
+       if (rdev->rmmio == NULL) {
+               return -ENOMEM;
+       }
+       DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
+       DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
+
+       /* Setup errata flags */
+       radeon_errata(rdev);
+       /* Initialize scratch registers */
+       radeon_scratch_init(rdev);
+
+       /* TODO: disable VGA need to use VGA request */
+       /* BIOS*/
+       if (!radeon_get_bios(rdev)) {
+               if (ASIC_IS_AVIVO(rdev))
+                       return -EINVAL;
+       }
+       if (rdev->is_atom_bios) {
+               r = radeon_atombios_init(rdev);
+               if (r) {
+                       return r;
+               }
+       } else {
+               r = radeon_combios_init(rdev);
+               if (r) {
+                       return r;
+               }
+       }
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               /* FIXME: what do we want to do here ? */
+       }
+       /* check if cards are posted or not */
+       if (!radeon_card_posted(rdev) && rdev->bios) {
+               DRM_INFO("GPU not posted. posting now...\n");
+               if (rdev->is_atom_bios) {
+                       atom_asic_init(rdev->mode_info.atom_context);
+               } else {
+                       radeon_combios_asic_init(rdev->ddev);
+               }
+       }
+       /* Get vram informations */
+       radeon_vram_info(rdev);
+       /* Device is severly broken if aper size > vram size.
+        * for RN50/M6/M7 - Novell bug 204882 ?
+        */
+       if (rdev->mc.vram_size < rdev->mc.aper_size) {
+               rdev->mc.aper_size = rdev->mc.vram_size;
+       }
+       /* Add an MTRR for the VRAM */
+       rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
+                                     MTRR_TYPE_WRCOMB, 1);
+       DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
+                rdev->mc.vram_size >> 20,
+                (unsigned)rdev->mc.aper_size >> 20);
+       DRM_INFO("RAM width %dbits %cDR\n",
+                rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
+       /* Initialize clocks */
+       r = radeon_clocks_init(rdev);
+       if (r) {
+               return r;
+       }
+       /* Initialize memory controller (also test AGP) */
+       r = radeon_mc_init(rdev);
+       if (r) {
+               return r;
+       }
+       /* Fence driver */
+       r = radeon_fence_driver_init(rdev);
+       if (r) {
+               return r;
+       }
+       r = radeon_irq_kms_init(rdev);
+       if (r) {
+               return r;
+       }
+       /* Memory manager */
+       r = radeon_object_init(rdev);
+       if (r) {
+               return r;
+       }
+       /* Initialize GART (initialize after TTM so we can allocate
+        * memory through TTM but finalize after TTM) */
+       r = radeon_gart_enable(rdev);
+       if (!r) {
+               r = radeon_gem_init(rdev);
+       }
+
+       /* 1M ring buffer */
+       if (!r) {
+               r = radeon_cp_init(rdev, 1024 * 1024);
+       }
+       if (!r) {
+               r = radeon_wb_init(rdev);
+               if (r) {
+                       DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
+                       return r;
+               }
+       }
+       if (!r) {
+               r = radeon_ib_pool_init(rdev);
+               if (r) {
+                       DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+                       return r;
+               }
+       }
+       if (!r) {
+               r = radeon_ib_test(rdev);
+               if (r) {
+                       DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+                       return r;
+               }
+       }
+       ret = r;
+       r = radeon_modeset_init(rdev);
+       if (r) {
+               return r;
+       }
+       if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
+               rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
+       }
+       if (!ret) {
+               DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
+       }
+       if (radeon_benchmarking) {
+               radeon_benchmark(rdev);
+       }
+       return ret;
+}
+
+void radeon_device_fini(struct radeon_device *rdev)
+{
+       if (rdev == NULL || rdev->rmmio == NULL) {
+               return;
+       }
+       DRM_INFO("radeon: finishing device.\n");
+       rdev->shutdown = true;
+       /* Order matter so becarefull if you rearrange anythings */
+       radeon_modeset_fini(rdev);
+       radeon_ib_pool_fini(rdev);
+       radeon_cp_fini(rdev);
+       radeon_wb_fini(rdev);
+       radeon_gem_fini(rdev);
+       radeon_object_fini(rdev);
+       /* mc_fini must be after object_fini */
+       radeon_mc_fini(rdev);
+#if __OS_HAS_AGP
+       radeon_agp_fini(rdev);
+#endif
+       radeon_irq_kms_fini(rdev);
+       radeon_fence_driver_fini(rdev);
+       radeon_clocks_fini(rdev);
+       if (rdev->is_atom_bios) {
+               radeon_atombios_fini(rdev);
+       } else {
+               radeon_combios_fini(rdev);
+       }
+       kfree(rdev->bios);
+       rdev->bios = NULL;
+       iounmap(rdev->rmmio);
+       rdev->rmmio = NULL;
+}
+
+
+/*
+ * Suspend & resume.
+ */
+int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_crtc *crtc;
+
+       if (dev == NULL || rdev == NULL) {
+               return -ENODEV;
+       }
+       if (state.event == PM_EVENT_PRETHAW) {
+               return 0;
+       }
+       /* unpin the front buffers */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
+               struct radeon_object *robj;
+
+               if (rfb == NULL || rfb->obj == NULL) {
+                       continue;
+               }
+               robj = rfb->obj->driver_private;
+               if (robj != rdev->fbdev_robj) {
+                       radeon_object_unpin(robj);
+               }
+       }
+       /* evict vram memory */
+       radeon_object_evict_vram(rdev);
+       /* wait for gpu to finish processing current batch */
+       radeon_fence_wait_last(rdev);
+
+       radeon_cp_disable(rdev);
+       radeon_gart_disable(rdev);
+
+       /* evict remaining vram memory */
+       radeon_object_evict_vram(rdev);
+
+       rdev->irq.sw_int = false;
+       radeon_irq_set(rdev);
+
+       pci_save_state(dev->pdev);
+       if (state.event == PM_EVENT_SUSPEND) {
+               /* Shut down the device */
+               pci_disable_device(dev->pdev);
+               pci_set_power_state(dev->pdev, PCI_D3hot);
+       }
+       acquire_console_sem();
+       fb_set_suspend(rdev->fbdev_info, 1);
+       release_console_sem();
+       return 0;
+}
+
+int radeon_resume_kms(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       int r;
+
+       acquire_console_sem();
+       pci_set_power_state(dev->pdev, PCI_D0);
+       pci_restore_state(dev->pdev);
+       if (pci_enable_device(dev->pdev)) {
+               release_console_sem();
+               return -1;
+       }
+       pci_set_master(dev->pdev);
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               /* FIXME: what do we want to do here ? */
+       }
+       /* post card */
+       if (rdev->is_atom_bios) {
+               atom_asic_init(rdev->mode_info.atom_context);
+       } else {
+               radeon_combios_asic_init(rdev->ddev);
+       }
+       /* Initialize clocks */
+       r = radeon_clocks_init(rdev);
+       if (r) {
+               release_console_sem();
+               return r;
+       }
+       /* Enable IRQ */
+       rdev->irq.sw_int = true;
+       radeon_irq_set(rdev);
+       /* Initialize GPU Memory Controller */
+       r = radeon_mc_init(rdev);
+       if (r) {
+               goto out;
+       }
+       r = radeon_gart_enable(rdev);
+       if (r) {
+               goto out;
+       }
+       r = radeon_cp_init(rdev, rdev->cp.ring_size);
+       if (r) {
+               goto out;
+       }
+out:
+       fb_set_suspend(rdev->fbdev_info, 0);
+       release_console_sem();
+
+       /* blat the mode back in */
+       drm_helper_resume_force_mode(dev);
+       return 0;
+}
+
+
+/*
+ * Debugfs
+ */
+struct radeon_debugfs {
+       struct drm_info_list    *files;
+       unsigned                num_files;
+};
+static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
+static unsigned _radeon_debugfs_count = 0;
+
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+                            struct drm_info_list *files,
+                            unsigned nfiles)
+{
+       unsigned i;
+
+       for (i = 0; i < _radeon_debugfs_count; i++) {
+               if (_radeon_debugfs[i].files == files) {
+                       /* Already registered */
+                       return 0;
+               }
+       }
+       if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
+               DRM_ERROR("Reached maximum number of debugfs files.\n");
+               DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
+               return -EINVAL;
+       }
+       _radeon_debugfs[_radeon_debugfs_count].files = files;
+       _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
+       _radeon_debugfs_count++;
+#if defined(CONFIG_DEBUG_FS)
+       drm_debugfs_create_files(files, nfiles,
+                                rdev->ddev->control->debugfs_root,
+                                rdev->ddev->control);
+       drm_debugfs_create_files(files, nfiles,
+                                rdev->ddev->primary->debugfs_root,
+                                rdev->ddev->primary);
+#endif
+       return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+int radeon_debugfs_init(struct drm_minor *minor)
+{
+       return 0;
+}
+
+void radeon_debugfs_cleanup(struct drm_minor *minor)
+{
+       unsigned i;
+
+       for (i = 0; i < _radeon_debugfs_count; i++) {
+               drm_debugfs_remove_files(_radeon_debugfs[i].files,
+                                        _radeon_debugfs[i].num_files, minor);
+       }
+}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
new file mode 100644 (file)
index 0000000..5452bb9
--- /dev/null
@@ -0,0 +1,692 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "atom.h"
+#include <asm/div64.h>
+
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+
+static int radeon_ddc_dump(struct drm_connector *connector);
+
+static void avivo_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       int i;
+
+       DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
+       WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
+
+       WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+       WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+       WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+       WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+       WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+       WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+       WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
+       WREG32(AVIVO_DC_LUT_RW_MODE, 0);
+       WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
+
+       WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
+       for (i = 0; i < 256; i++) {
+               WREG32(AVIVO_DC_LUT_30_COLOR,
+                            (radeon_crtc->lut_r[i] << 20) |
+                            (radeon_crtc->lut_g[i] << 10) |
+                            (radeon_crtc->lut_b[i] << 0));
+       }
+
+       WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
+}
+
+static void legacy_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       int i;
+       uint32_t dac2_cntl;
+
+       dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+       if (radeon_crtc->crtc_id == 0)
+               dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
+       else
+               dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
+       WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+
+       WREG8(RADEON_PALETTE_INDEX, 0);
+       for (i = 0; i < 256; i++) {
+               WREG32(RADEON_PALETTE_30_DATA,
+                            (radeon_crtc->lut_r[i] << 20) |
+                            (radeon_crtc->lut_g[i] << 10) |
+                            (radeon_crtc->lut_b[i] << 0));
+       }
+}
+
+void radeon_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
+       if (!crtc->enabled)
+               return;
+
+       if (ASIC_IS_AVIVO(rdev))
+               avivo_crtc_load_lut(crtc);
+       else
+               legacy_crtc_load_lut(crtc);
+}
+
+/** Sets the color ramps on behalf of RandR */
+void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+                             u16 blue, int regno)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+       if (regno == 0)
+               DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id);
+       radeon_crtc->lut_r[regno] = red >> 6;
+       radeon_crtc->lut_g[regno] = green >> 6;
+       radeon_crtc->lut_b[regno] = blue >> 6;
+}
+
+static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                 u16 *blue, uint32_t size)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       int i, j;
+
+       if (size != 256) {
+               return;
+       }
+       if (crtc->fb == NULL) {
+               return;
+       }
+
+       if (crtc->fb->depth == 16) {
+               for (i = 0; i < 64; i++) {
+                       if (i <= 31) {
+                               for (j = 0; j < 8; j++) {
+                                       radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6;
+                                       radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6;
+                               }
+                       }
+                       for (j = 0; j < 4; j++)
+                               radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6;
+               }
+       } else {
+               for (i = 0; i < 256; i++) {
+                       radeon_crtc->lut_r[i] = red[i] >> 6;
+                       radeon_crtc->lut_g[i] = green[i] >> 6;
+                       radeon_crtc->lut_b[i] = blue[i] >> 6;
+               }
+       }
+
+       radeon_crtc_load_lut(crtc);
+}
+
+static void radeon_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+       if (radeon_crtc->mode_set.mode) {
+               drm_mode_destroy(crtc->dev, radeon_crtc->mode_set.mode);
+       }
+       drm_crtc_cleanup(crtc);
+       kfree(radeon_crtc);
+}
+
+static const struct drm_crtc_funcs radeon_crtc_funcs = {
+       .cursor_set = radeon_crtc_cursor_set,
+       .cursor_move = radeon_crtc_cursor_move,
+       .gamma_set = radeon_crtc_gamma_set,
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = radeon_crtc_destroy,
+};
+
+static void radeon_crtc_init(struct drm_device *dev, int index)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc;
+       int i;
+
+       radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+       if (radeon_crtc == NULL)
+               return;
+
+       drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
+       radeon_crtc->crtc_id = index;
+
+       radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+       radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
+       radeon_crtc->mode_set.num_connectors = 0;
+
+       for (i = 0; i < 256; i++) {
+               radeon_crtc->lut_r[i] = i << 2;
+               radeon_crtc->lut_g[i] = i << 2;
+               radeon_crtc->lut_b[i] = i << 2;
+       }
+
+       if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
+               radeon_atombios_init_crtc(dev, radeon_crtc);
+       else
+               radeon_legacy_init_crtc(dev, radeon_crtc);
+}
+
+static const char *encoder_names[34] = {
+       "NONE",
+       "INTERNAL_LVDS",
+       "INTERNAL_TMDS1",
+       "INTERNAL_TMDS2",
+       "INTERNAL_DAC1",
+       "INTERNAL_DAC2",
+       "INTERNAL_SDVOA",
+       "INTERNAL_SDVOB",
+       "SI170B",
+       "CH7303",
+       "CH7301",
+       "INTERNAL_DVO1",
+       "EXTERNAL_SDVOA",
+       "EXTERNAL_SDVOB",
+       "TITFP513",
+       "INTERNAL_LVTM1",
+       "VT1623",
+       "HDMI_SI1930",
+       "HDMI_INTERNAL",
+       "INTERNAL_KLDSCP_TMDS1",
+       "INTERNAL_KLDSCP_DVO1",
+       "INTERNAL_KLDSCP_DAC1",
+       "INTERNAL_KLDSCP_DAC2",
+       "SI178",
+       "MVPU_FPGA",
+       "INTERNAL_DDI",
+       "VT1625",
+       "HDMI_SI1932",
+       "DP_AN9801",
+       "DP_DP501",
+       "INTERNAL_UNIPHY",
+       "INTERNAL_KLDSCP_LVTMA",
+       "INTERNAL_UNIPHY1",
+       "INTERNAL_UNIPHY2",
+};
+
+static const char *connector_names[13] = {
+       "Unknown",
+       "VGA",
+       "DVI-I",
+       "DVI-D",
+       "DVI-A",
+       "Composite",
+       "S-video",
+       "LVDS",
+       "Component",
+       "DIN",
+       "DisplayPort",
+       "HDMI-A",
+       "HDMI-B",
+};
+
+static void radeon_print_display_setup(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+       struct drm_encoder *encoder;
+       struct radeon_encoder *radeon_encoder;
+       uint32_t devices;
+       int i = 0;
+
+       DRM_INFO("Radeon Display Connectors\n");
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               radeon_connector = to_radeon_connector(connector);
+               DRM_INFO("Connector %d:\n", i);
+               DRM_INFO("  %s\n", connector_names[connector->connector_type]);
+               if (radeon_connector->ddc_bus)
+                       DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+                                radeon_connector->ddc_bus->rec.mask_clk_reg,
+                                radeon_connector->ddc_bus->rec.mask_data_reg,
+                                radeon_connector->ddc_bus->rec.a_clk_reg,
+                                radeon_connector->ddc_bus->rec.a_data_reg,
+                                radeon_connector->ddc_bus->rec.put_clk_reg,
+                                radeon_connector->ddc_bus->rec.put_data_reg,
+                                radeon_connector->ddc_bus->rec.get_clk_reg,
+                                radeon_connector->ddc_bus->rec.get_data_reg);
+               DRM_INFO("  Encoders:\n");
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       radeon_encoder = to_radeon_encoder(encoder);
+                       devices = radeon_encoder->devices & radeon_connector->devices;
+                       if (devices) {
+                               if (devices & ATOM_DEVICE_CRT1_SUPPORT)
+                                       DRM_INFO("    CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_CRT2_SUPPORT)
+                                       DRM_INFO("    CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_LCD1_SUPPORT)
+                                       DRM_INFO("    LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_DFP1_SUPPORT)
+                                       DRM_INFO("    DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_DFP2_SUPPORT)
+                                       DRM_INFO("    DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_DFP3_SUPPORT)
+                                       DRM_INFO("    DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_DFP4_SUPPORT)
+                                       DRM_INFO("    DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_DFP5_SUPPORT)
+                                       DRM_INFO("    DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_TV1_SUPPORT)
+                                       DRM_INFO("    TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_CV_SUPPORT)
+                                       DRM_INFO("    CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                       }
+               }
+               i++;
+       }
+}
+
+bool radeon_setup_enc_conn(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_connector *drm_connector;
+       bool ret = false;
+
+       if (rdev->bios) {
+               if (rdev->is_atom_bios) {
+                       if (rdev->family >= CHIP_R600)
+                               ret = radeon_get_atom_connector_info_from_object_table(dev);
+                       else
+                               ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
+               } else
+                       ret = radeon_get_legacy_connector_info_from_bios(dev);
+       } else {
+               if (!ASIC_IS_AVIVO(rdev))
+                       ret = radeon_get_legacy_connector_info_from_table(dev);
+       }
+       if (ret) {
+               radeon_print_display_setup(dev);
+               list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
+                       radeon_ddc_dump(drm_connector);
+       }
+
+       return ret;
+}
+
+int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+{
+       struct edid *edid;
+       int ret = 0;
+
+       if (!radeon_connector->ddc_bus)
+               return -1;
+       radeon_i2c_do_lock(radeon_connector, 1);
+       edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+       radeon_i2c_do_lock(radeon_connector, 0);
+       if (edid) {
+               /* update digital bits here */
+               if (edid->digital)
+                       radeon_connector->use_digital = 1;
+               else
+                       radeon_connector->use_digital = 0;
+               drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
+               ret = drm_add_edid_modes(&radeon_connector->base, edid);
+               kfree(edid);
+               return ret;
+       }
+       drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+       return -1;
+}
+
+static int radeon_ddc_dump(struct drm_connector *connector)
+{
+       struct edid *edid;
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+       int ret = 0;
+
+       if (!radeon_connector->ddc_bus)
+               return -1;
+       radeon_i2c_do_lock(radeon_connector, 1);
+       edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
+       radeon_i2c_do_lock(radeon_connector, 0);
+       if (edid) {
+               kfree(edid);
+       }
+       return ret;
+}
+
+static inline uint32_t radeon_div(uint64_t n, uint32_t d)
+{
+       uint64_t mod;
+
+       n += d / 2;
+
+       mod = do_div(n, d);
+       return n;
+}
+
+void radeon_compute_pll(struct radeon_pll *pll,
+                       uint64_t freq,
+                       uint32_t *dot_clock_p,
+                       uint32_t *fb_div_p,
+                       uint32_t *frac_fb_div_p,
+                       uint32_t *ref_div_p,
+                       uint32_t *post_div_p,
+                       int flags)
+{
+       uint32_t min_ref_div = pll->min_ref_div;
+       uint32_t max_ref_div = pll->max_ref_div;
+       uint32_t min_fractional_feed_div = 0;
+       uint32_t max_fractional_feed_div = 0;
+       uint32_t best_vco = pll->best_vco;
+       uint32_t best_post_div = 1;
+       uint32_t best_ref_div = 1;
+       uint32_t best_feedback_div = 1;
+       uint32_t best_frac_feedback_div = 0;
+       uint32_t best_freq = -1;
+       uint32_t best_error = 0xffffffff;
+       uint32_t best_vco_diff = 1;
+       uint32_t post_div;
+
+       DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
+       freq = freq * 1000;
+
+       if (flags & RADEON_PLL_USE_REF_DIV)
+               min_ref_div = max_ref_div = pll->reference_div;
+       else {
+               while (min_ref_div < max_ref_div-1) {
+                       uint32_t mid = (min_ref_div + max_ref_div) / 2;
+                       uint32_t pll_in = pll->reference_freq / mid;
+                       if (pll_in < pll->pll_in_min)
+                               max_ref_div = mid;
+                       else if (pll_in > pll->pll_in_max)
+                               min_ref_div = mid;
+                       else
+                               break;
+               }
+       }
+
+       if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+               min_fractional_feed_div = pll->min_frac_feedback_div;
+               max_fractional_feed_div = pll->max_frac_feedback_div;
+       }
+
+       for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
+               uint32_t ref_div;
+
+               if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+                       continue;
+
+               /* legacy radeons only have a few post_divs */
+               if (flags & RADEON_PLL_LEGACY) {
+                       if ((post_div == 5) ||
+                           (post_div == 7) ||
+                           (post_div == 9) ||
+                           (post_div == 10) ||
+                           (post_div == 11) ||
+                           (post_div == 13) ||
+                           (post_div == 14) ||
+                           (post_div == 15))
+                               continue;
+               }
+
+               for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
+                       uint32_t feedback_div, current_freq = 0, error, vco_diff;
+                       uint32_t pll_in = pll->reference_freq / ref_div;
+                       uint32_t min_feed_div = pll->min_feedback_div;
+                       uint32_t max_feed_div = pll->max_feedback_div + 1;
+
+                       if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
+                               continue;
+
+                       while (min_feed_div < max_feed_div) {
+                               uint32_t vco;
+                               uint32_t min_frac_feed_div = min_fractional_feed_div;
+                               uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
+                               uint32_t frac_feedback_div;
+                               uint64_t tmp;
+
+                               feedback_div = (min_feed_div + max_feed_div) / 2;
+
+                               tmp = (uint64_t)pll->reference_freq * feedback_div;
+                               vco = radeon_div(tmp, ref_div);
+
+                               if (vco < pll->pll_out_min) {
+                                       min_feed_div = feedback_div + 1;
+                                       continue;
+                               } else if (vco > pll->pll_out_max) {
+                                       max_feed_div = feedback_div;
+                                       continue;
+                               }
+
+                               while (min_frac_feed_div < max_frac_feed_div) {
+                                       frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
+                                       tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
+                                       tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
+                                       current_freq = radeon_div(tmp, ref_div * post_div);
+
+                                       error = abs(current_freq - freq);
+                                       vco_diff = abs(vco - best_vco);
+
+                                       if ((best_vco == 0 && error < best_error) ||
+                                           (best_vco != 0 &&
+                                            (error < best_error - 100 ||
+                                             (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
+                                               best_post_div = post_div;
+                                               best_ref_div = ref_div;
+                                               best_feedback_div = feedback_div;
+                                               best_frac_feedback_div = frac_feedback_div;
+                                               best_freq = current_freq;
+                                               best_error = error;
+                                               best_vco_diff = vco_diff;
+                                       } else if (current_freq == freq) {
+                                               if (best_freq == -1) {
+                                                       best_post_div = post_div;
+                                                       best_ref_div = ref_div;
+                                                       best_feedback_div = feedback_div;
+                                                       best_frac_feedback_div = frac_feedback_div;
+                                                       best_freq = current_freq;
+                                                       best_error = error;
+                                                       best_vco_diff = vco_diff;
+                                               } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
+                                                          ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
+                                                          ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
+                                                          ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
+                                                          ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
+                                                          ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
+                                                       best_post_div = post_div;
+                                                       best_ref_div = ref_div;
+                                                       best_feedback_div = feedback_div;
+                                                       best_frac_feedback_div = frac_feedback_div;
+                                                       best_freq = current_freq;
+                                                       best_error = error;
+                                                       best_vco_diff = vco_diff;
+                                               }
+                                       }
+                                       if (current_freq < freq)
+                                               min_frac_feed_div = frac_feedback_div + 1;
+                                       else
+                                               max_frac_feed_div = frac_feedback_div;
+                               }
+                               if (current_freq < freq)
+                                       min_feed_div = feedback_div + 1;
+                               else
+                                       max_feed_div = feedback_div;
+                       }
+               }
+       }
+
+       *dot_clock_p = best_freq / 10000;
+       *fb_div_p = best_feedback_div;
+       *frac_fb_div_p = best_frac_feedback_div;
+       *ref_div_p = best_ref_div;
+       *post_div_p = best_post_div;
+}
+
+static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+       struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+       struct drm_device *dev = fb->dev;
+
+       if (fb->fbdev)
+               radeonfb_remove(dev, fb);
+
+       if (radeon_fb->obj) {
+               radeon_gem_object_unpin(radeon_fb->obj);
+               mutex_lock(&dev->struct_mutex);
+               drm_gem_object_unreference(radeon_fb->obj);
+               mutex_unlock(&dev->struct_mutex);
+       }
+       drm_framebuffer_cleanup(fb);
+       kfree(radeon_fb);
+}
+
+static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+                                                 struct drm_file *file_priv,
+                                                 unsigned int *handle)
+{
+       struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+
+       return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs radeon_fb_funcs = {
+       .destroy = radeon_user_framebuffer_destroy,
+       .create_handle = radeon_user_framebuffer_create_handle,
+};
+
+struct drm_framebuffer *
+radeon_framebuffer_create(struct drm_device *dev,
+                         struct drm_mode_fb_cmd *mode_cmd,
+                         struct drm_gem_object *obj)
+{
+       struct radeon_framebuffer *radeon_fb;
+
+       radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+       if (radeon_fb == NULL) {
+               return NULL;
+       }
+       drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
+       drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
+       radeon_fb->obj = obj;
+       return &radeon_fb->base;
+}
+
+static struct drm_framebuffer *
+radeon_user_framebuffer_create(struct drm_device *dev,
+                              struct drm_file *file_priv,
+                              struct drm_mode_fb_cmd *mode_cmd)
+{
+       struct drm_gem_object *obj;
+
+       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+
+       return radeon_framebuffer_create(dev, mode_cmd, obj);
+}
+
+static const struct drm_mode_config_funcs radeon_mode_funcs = {
+       .fb_create = radeon_user_framebuffer_create,
+       .fb_changed = radeonfb_probe,
+};
+
+int radeon_modeset_init(struct radeon_device *rdev)
+{
+       int num_crtc = 2, i;
+       int ret;
+
+       drm_mode_config_init(rdev->ddev);
+       rdev->mode_info.mode_config_initialized = true;
+
+       rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
+
+       if (ASIC_IS_AVIVO(rdev)) {
+               rdev->ddev->mode_config.max_width = 8192;
+               rdev->ddev->mode_config.max_height = 8192;
+       } else {
+               rdev->ddev->mode_config.max_width = 4096;
+               rdev->ddev->mode_config.max_height = 4096;
+       }
+
+       rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
+
+       /* allocate crtcs - TODO single crtc */
+       for (i = 0; i < num_crtc; i++) {
+               radeon_crtc_init(rdev->ddev, i);
+       }
+
+       /* okay we should have all the bios connectors */
+       ret = radeon_setup_enc_conn(rdev->ddev);
+       if (!ret) {
+               return ret;
+       }
+       drm_helper_initial_config(rdev->ddev);
+       return 0;
+}
+
+void radeon_modeset_fini(struct radeon_device *rdev)
+{
+       if (rdev->mode_info.mode_config_initialized) {
+               drm_mode_config_cleanup(rdev->ddev);
+               rdev->mode_info.mode_config_initialized = false;
+       }
+}
+
+void radeon_init_disp_bandwidth(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_display_mode *modes[2];
+       int pixel_bytes[2];
+       struct drm_crtc *crtc;
+
+       pixel_bytes[0] = pixel_bytes[1] = 0;
+       modes[0] = modes[1] = NULL;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+               if (crtc->enabled && crtc->fb) {
+                       modes[radeon_crtc->crtc_id] = &crtc->mode;
+                       pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8;
+               }
+       }
+
+       if (ASIC_IS_AVIVO(rdev)) {
+               radeon_init_disp_bw_avivo(dev,
+                                         modes[0],
+                                         pixel_bytes[0],
+                                         modes[1],
+                                         pixel_bytes[1]);
+       } else {
+               radeon_init_disp_bw_legacy(dev,
+                                          modes[0],
+                                          pixel_bytes[0],
+                                          modes[1],
+                                          pixel_bytes[1]);
+       }
+}
index 13a60f4..c815a2c 100644 (file)
 #include "radeon_drv.h"
 
 #include "drm_pciids.h"
+#include <linux/console.h>
+
+
+#if defined(CONFIG_DRM_RADEON_KMS)
+/*
+ * KMS wrapper.
+ */
+#define KMS_DRIVER_MAJOR       2
+#define KMS_DRIVER_MINOR       0
+#define KMS_DRIVER_PATCHLEVEL  0
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
+int radeon_driver_unload_kms(struct drm_device *dev);
+int radeon_driver_firstopen_kms(struct drm_device *dev);
+void radeon_driver_lastclose_kms(struct drm_device *dev);
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
+void radeon_driver_postclose_kms(struct drm_device *dev,
+                                struct drm_file *file_priv);
+void radeon_driver_preclose_kms(struct drm_device *dev,
+                               struct drm_file *file_priv);
+int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+int radeon_resume_kms(struct drm_device *dev);
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
+irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
+int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master);
+void radeon_master_destroy_kms(struct drm_device *dev,
+                              struct drm_master *master);
+int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+int radeon_gem_object_init(struct drm_gem_object *obj);
+void radeon_gem_object_free(struct drm_gem_object *obj);
+extern struct drm_ioctl_desc radeon_ioctls_kms[];
+extern int radeon_max_kms_ioctl;
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+#if defined(CONFIG_DEBUG_FS)
+int radeon_debugfs_init(struct drm_minor *minor);
+void radeon_debugfs_cleanup(struct drm_minor *minor);
+#endif
+#endif
+
 
 int radeon_no_wb;
+#if defined(CONFIG_DRM_RADEON_KMS)
+int radeon_modeset = -1;
+int radeon_dynclks = -1;
+int radeon_r4xx_atom = 0;
+int radeon_agpmode = 0;
+int radeon_vram_limit = 0;
+int radeon_gart_size = 512; /* default gart size */
+int radeon_benchmarking = 0;
+int radeon_connector_table = 0;
+#endif
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
 
+#if defined(CONFIG_DRM_RADEON_KMS)
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, radeon_modeset, int, 0400);
+
+MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks");
+module_param_named(dynclks, radeon_dynclks, int, 0444);
+
+MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
+module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
+
+MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing");
+module_param_named(vramlimit, radeon_vram_limit, int, 0600);
+
+MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
+module_param_named(agpmode, radeon_agpmode, int, 0444);
+
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32,64, etc)\n");
+module_param_named(gartsize, radeon_gart_size, int, 0600);
+
+MODULE_PARM_DESC(benchmark, "Run benchmark");
+module_param_named(benchmark, radeon_benchmarking, int, 0444);
+
+MODULE_PARM_DESC(connector_table, "Force connector table");
+module_param_named(connector_table, radeon_connector_table, int, 0444);
+#endif
+
 static int radeon_suspend(struct drm_device *dev, pm_message_t state)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -73,7 +153,11 @@ static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
 };
 
-static struct drm_driver driver = {
+#if defined(CONFIG_DRM_RADEON_KMS)
+MODULE_DEVICE_TABLE(pci, pciidlist);
+#endif
+
+static struct drm_driver driver_old = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
            DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
@@ -127,18 +211,141 @@ static struct drm_driver driver = {
        .patchlevel = DRIVER_PATCHLEVEL,
 };
 
+#if defined(CONFIG_DRM_RADEON_KMS)
+static struct drm_driver kms_driver;
+
+static int __devinit
+radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       return drm_get_dev(pdev, ent, &kms_driver);
+}
+
+static void
+radeon_pci_remove(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       drm_put_dev(dev);
+}
+
+static int
+radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       return radeon_suspend_kms(dev, state);
+}
+
+static int
+radeon_pci_resume(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       return radeon_resume_kms(dev);
+}
+
+static struct drm_driver kms_driver = {
+       .driver_features =
+           DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+           DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
+       .dev_priv_size = 0,
+       .load = radeon_driver_load_kms,
+       .firstopen = radeon_driver_firstopen_kms,
+       .open = radeon_driver_open_kms,
+       .preclose = radeon_driver_preclose_kms,
+       .postclose = radeon_driver_postclose_kms,
+       .lastclose = radeon_driver_lastclose_kms,
+       .unload = radeon_driver_unload_kms,
+       .suspend = radeon_suspend_kms,
+       .resume = radeon_resume_kms,
+       .get_vblank_counter = radeon_get_vblank_counter_kms,
+       .enable_vblank = radeon_enable_vblank_kms,
+       .disable_vblank = radeon_disable_vblank_kms,
+       .master_create = radeon_master_create_kms,
+       .master_destroy = radeon_master_destroy_kms,
+#if defined(CONFIG_DEBUG_FS)
+       .debugfs_init = radeon_debugfs_init,
+       .debugfs_cleanup = radeon_debugfs_cleanup,
+#endif
+       .irq_preinstall = radeon_driver_irq_preinstall_kms,
+       .irq_postinstall = radeon_driver_irq_postinstall_kms,
+       .irq_uninstall = radeon_driver_irq_uninstall_kms,
+       .irq_handler = radeon_driver_irq_handler_kms,
+       .reclaim_buffers = drm_core_reclaim_buffers,
+       .get_map_ofs = drm_core_get_map_ofs,
+       .get_reg_ofs = drm_core_get_reg_ofs,
+       .ioctls = radeon_ioctls_kms,
+       .gem_init_object = radeon_gem_object_init,
+       .gem_free_object = radeon_gem_object_free,
+       .dma_ioctl = radeon_dma_ioctl_kms,
+       .fops = {
+                .owner = THIS_MODULE,
+                .open = drm_open,
+                .release = drm_release,
+                .ioctl = drm_ioctl,
+                .mmap = radeon_mmap,
+                .poll = drm_poll,
+                .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+                .compat_ioctl = NULL,
+#endif
+       },
+
+       .pci_driver = {
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+                .probe = radeon_pci_probe,
+                .remove = radeon_pci_remove,
+                .suspend = radeon_pci_suspend,
+                .resume = radeon_pci_resume,
+       },
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = KMS_DRIVER_MAJOR,
+       .minor = KMS_DRIVER_MINOR,
+       .patchlevel = KMS_DRIVER_PATCHLEVEL,
+};
+#endif
+
+static struct drm_driver *driver;
+
 static int __init radeon_init(void)
 {
-       driver.num_ioctls = radeon_max_ioctl;
-       return drm_init(&driver);
+       driver = &driver_old;
+       driver->num_ioctls = radeon_max_ioctl;
+#if defined(CONFIG_DRM_RADEON_KMS) && defined(CONFIG_X86)
+       /* if enabled by default */
+       if (radeon_modeset == -1) {
+               DRM_INFO("radeon default to kernel modesetting.\n");
+               radeon_modeset = 1;
+       }
+       if (radeon_modeset == 1) {
+               DRM_INFO("radeon kernel modesetting enabled.\n");
+               driver = &kms_driver;
+               driver->driver_features |= DRIVER_MODESET;
+               driver->num_ioctls = radeon_max_kms_ioctl;
+       }
+
+       /* if the vga console setting is enabled still
+        * let modprobe override it */
+#ifdef CONFIG_VGA_CONSOLE
+       if (vgacon_text_force() && radeon_modeset == -1) {
+               DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+               driver = &driver_old;
+               driver->driver_features &= ~DRIVER_MODESET;
+               radeon_modeset = 0;
+       }
+#endif
+#endif
+       return drm_init(driver);
 }
 
 static void __exit radeon_exit(void)
 {
-       drm_exit(&driver);
+       drm_exit(driver);
 }
 
-module_init(radeon_init);
+late_initcall(radeon_init);
 module_exit(radeon_exit);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
new file mode 100644 (file)
index 0000000..c8ef0d1
--- /dev/null
@@ -0,0 +1,1708 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+extern int atom_debug;
+
+uint32_t
+radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t ret = 0;
+
+       switch (supported_device) {
+       case ATOM_DEVICE_CRT1_SUPPORT:
+       case ATOM_DEVICE_TV1_SUPPORT:
+       case ATOM_DEVICE_TV2_SUPPORT:
+       case ATOM_DEVICE_CRT2_SUPPORT:
+       case ATOM_DEVICE_CV_SUPPORT:
+               switch (dac) {
+               case 1: /* dac a */
+                       if ((rdev->family == CHIP_RS300) ||
+                           (rdev->family == CHIP_RS400) ||
+                           (rdev->family == CHIP_RS480))
+                               ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+                       else if (ASIC_IS_AVIVO(rdev))
+                               ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+                       else
+                               ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
+                       break;
+               case 2: /* dac b */
+                       if (ASIC_IS_AVIVO(rdev))
+                               ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+                       else {
+                               /*if (rdev->family == CHIP_R200)
+                                 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+                                 else*/
+                               ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+                       }
+                       break;
+               case 3: /* external dac */
+                       if (ASIC_IS_AVIVO(rdev))
+                               ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+                       else
+                               ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+                       break;
+               }
+               break;
+       case ATOM_DEVICE_LCD1_SUPPORT:
+               if (ASIC_IS_AVIVO(rdev))
+                       ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+               else
+                       ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
+               break;
+       case ATOM_DEVICE_DFP1_SUPPORT:
+               if ((rdev->family == CHIP_RS300) ||
+                   (rdev->family == CHIP_RS400) ||
+                   (rdev->family == CHIP_RS480))
+                       ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+               else if (ASIC_IS_AVIVO(rdev))
+                       ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+               else
+                       ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+               break;
+       case ATOM_DEVICE_LCD2_SUPPORT:
+       case ATOM_DEVICE_DFP2_SUPPORT:
+               if ((rdev->family == CHIP_RS600) ||
+                   (rdev->family == CHIP_RS690) ||
+                   (rdev->family == CHIP_RS740))
+                       ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
+               else if (ASIC_IS_AVIVO(rdev))
+                       ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+               else
+                       ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+               break;
+       case ATOM_DEVICE_DFP3_SUPPORT:
+               ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+               break;
+       }
+
+       return ret;
+}
+
+void
+radeon_link_encoder_connector(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+       struct drm_encoder *encoder;
+       struct radeon_encoder *radeon_encoder;
+
+       /* walk the list and link encoders to connectors */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               radeon_connector = to_radeon_connector(connector);
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       radeon_encoder = to_radeon_encoder(encoder);
+                       if (radeon_encoder->devices & radeon_connector->devices)
+                               drm_mode_connector_attach_encoder(connector, encoder);
+               }
+       }
+}
+
+static struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               radeon_connector = to_radeon_connector(connector);
+               if (radeon_encoder->devices & radeon_connector->devices)
+                       return connector;
+       }
+       return NULL;
+}
+
+/* used for both atom and legacy */
+void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
+                          struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode)
+{
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
+
+       if (mode->hdisplay < native_mode->panel_xres ||
+           mode->vdisplay < native_mode->panel_yres) {
+               radeon_encoder->flags |= RADEON_USE_RMX;
+               if (ASIC_IS_AVIVO(rdev)) {
+                       adjusted_mode->hdisplay = native_mode->panel_xres;
+                       adjusted_mode->vdisplay = native_mode->panel_yres;
+                       adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
+                       adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
+                       adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
+                       adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
+                       adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
+                       adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
+                       /* update crtc values */
+                       drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+                       /* adjust crtc values */
+                       adjusted_mode->crtc_hdisplay = native_mode->panel_xres;
+                       adjusted_mode->crtc_vdisplay = native_mode->panel_yres;
+                       adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
+                       adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
+                       adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
+                       adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
+                       adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
+                       adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
+               } else {
+                       adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
+                       adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
+                       adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
+                       adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
+                       adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
+                       adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
+                       /* update crtc values */
+                       drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+                       /* adjust crtc values */
+                       adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
+                       adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
+                       adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
+                       adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
+                       adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
+                       adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
+               }
+               adjusted_mode->flags = native_mode->flags;
+               adjusted_mode->clock = native_mode->dotclock;
+       }
+}
+
+static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+                                  struct drm_display_mode *mode,
+                                  struct drm_display_mode *adjusted_mode)
+{
+
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+       radeon_encoder->flags &= ~RADEON_USE_RMX;
+
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+       if (radeon_encoder->rmx_type != RMX_OFF)
+               radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+
+       /* hw bug */
+       if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+           && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+               adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
+       return true;
+}
+
+static void
+atombios_dac_setup(struct drm_encoder *encoder, int action)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       DAC_ENCODER_CONTROL_PS_ALLOCATION args;
+       int index = 0, num = 0;
+       /* fixme - fill in enc_priv for atom dac */
+       enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+       memset(&args, 0, sizeof(args));
+
+       switch (radeon_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+               index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
+               num = 1;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+               index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
+               num = 2;
+               break;
+       }
+
+       args.ucAction = action;
+
+       if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+               args.ucDacStandard = ATOM_DAC1_PS2;
+       else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+               args.ucDacStandard = ATOM_DAC1_CV;
+       else {
+               switch (tv_std) {
+               case TV_STD_PAL:
+               case TV_STD_PAL_M:
+               case TV_STD_SCART_PAL:
+               case TV_STD_SECAM:
+               case TV_STD_PAL_CN:
+                       args.ucDacStandard = ATOM_DAC1_PAL;
+                       break;
+               case TV_STD_NTSC:
+               case TV_STD_NTSC_J:
+               case TV_STD_PAL_60:
+               default:
+                       args.ucDacStandard = ATOM_DAC1_NTSC;
+                       break;
+               }
+       }
+       args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_tv_setup(struct drm_encoder *encoder, int action)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       TV_ENCODER_CONTROL_PS_ALLOCATION args;
+       int index = 0;
+       /* fixme - fill in enc_priv for atom dac */
+       enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+       memset(&args, 0, sizeof(args));
+
+       index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
+
+       args.sTVEncoder.ucAction = action;
+
+       if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+               args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
+       else {
+               switch (tv_std) {
+               case TV_STD_NTSC:
+                       args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+                       break;
+               case TV_STD_PAL:
+                       args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
+                       break;
+               case TV_STD_PAL_M:
+                       args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
+                       break;
+               case TV_STD_PAL_60:
+                       args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
+                       break;
+               case TV_STD_NTSC_J:
+                       args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
+                       break;
+               case TV_STD_SCART_PAL:
+                       args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
+                       break;
+               case TV_STD_SECAM:
+                       args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
+                       break;
+               case TV_STD_PAL_CN:
+                       args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
+                       break;
+               default:
+                       args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+                       break;
+               }
+       }
+
+       args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+void
+atombios_external_tmds_setup(struct drm_encoder *encoder, int action)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args;
+       int index = 0;
+
+       memset(&args, 0, sizeof(args));
+
+       index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+
+       args.sXTmdsEncoder.ucEnable = action;
+
+       if (radeon_encoder->pixel_clock > 165000)
+               args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL;
+
+       /*if (pScrn->rgbBits == 8)*/
+       args.sXTmdsEncoder.ucMisc |= (1 << 1);
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_ddia_setup(struct drm_encoder *encoder, int action)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       DVO_ENCODER_CONTROL_PS_ALLOCATION args;
+       int index = 0;
+
+       memset(&args, 0, sizeof(args));
+
+       index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+
+       args.sDVOEncoder.ucAction = action;
+       args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+       if (radeon_encoder->pixel_clock > 165000)
+               args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL;
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union lvds_encoder_control {
+       LVDS_ENCODER_CONTROL_PS_ALLOCATION    v1;
+       LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
+};
+
+static void
+atombios_digital_setup(struct drm_encoder *encoder, int action)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       union lvds_encoder_control args;
+       int index = 0;
+       uint8_t frev, crev;
+       struct radeon_encoder_atom_dig *dig;
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+       struct radeon_connector_atom_dig *dig_connector;
+
+       connector = radeon_get_connector_for_encoder(encoder);
+       if (!connector)
+               return;
+
+       radeon_connector = to_radeon_connector(connector);
+
+       if (!radeon_encoder->enc_priv)
+               return;
+
+       dig = radeon_encoder->enc_priv;
+
+       if (!radeon_connector->con_priv)
+               return;
+
+       dig_connector = radeon_connector->con_priv;
+
+       memset(&args, 0, sizeof(args));
+
+       switch (radeon_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+               index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+               index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+               if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+                       index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+               else
+                       index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
+               break;
+       }
+
+       atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+
+       switch (frev) {
+       case 1:
+       case 2:
+               switch (crev) {
+               case 1:
+                       args.v1.ucMisc = 0;
+                       args.v1.ucAction = action;
+                       if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+                               args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+                       args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+                       if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+                               if (dig->lvds_misc & (1 << 0))
+                                       args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+                               if (dig->lvds_misc & (1 << 1))
+                                       args.v1.ucMisc |= (1 << 1);
+                       } else {
+                               if (dig_connector->linkb)
+                                       args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+                               if (radeon_encoder->pixel_clock > 165000)
+                                       args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+                               /*if (pScrn->rgbBits == 8) */
+                               args.v1.ucMisc |= (1 << 1);
+                       }
+                       break;
+               case 2:
+               case 3:
+                       args.v2.ucMisc = 0;
+                       args.v2.ucAction = action;
+                       if (crev == 3) {
+                               if (dig->coherent_mode)
+                                       args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
+                       }
+                       if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+                               args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+                       args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+                       args.v2.ucTruncate = 0;
+                       args.v2.ucSpatial = 0;
+                       args.v2.ucTemporal = 0;
+                       args.v2.ucFRC = 0;
+                       if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+                               if (dig->lvds_misc & (1 << 0))
+                                       args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+                               if (dig->lvds_misc & (1 << 5)) {
+                                       args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
+                                       if (dig->lvds_misc & (1 << 1))
+                                               args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
+                               }
+                               if (dig->lvds_misc & (1 << 6)) {
+                                       args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
+                                       if (dig->lvds_misc & (1 << 1))
+                                               args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
+                                       if (((dig->lvds_misc >> 2) & 0x3) == 2)
+                                               args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
+                               }
+                       } else {
+                               if (dig_connector->linkb)
+                                       args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+                               if (radeon_encoder->pixel_clock > 165000)
+                                       args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+                       }
+                       break;
+               default:
+                       DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+                       break;
+               }
+               break;
+       default:
+               DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+               break;
+       }
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+int
+atombios_get_encoder_mode(struct drm_encoder *encoder)
+{
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+
+       connector = radeon_get_connector_for_encoder(encoder);
+       if (!connector)
+               return 0;
+
+       radeon_connector = to_radeon_connector(connector);
+
+       switch (connector->connector_type) {
+       case DRM_MODE_CONNECTOR_DVII:
+               if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+                       return ATOM_ENCODER_MODE_HDMI;
+               else if (radeon_connector->use_digital)
+                       return ATOM_ENCODER_MODE_DVI;
+               else
+                       return ATOM_ENCODER_MODE_CRT;
+               break;
+       case DRM_MODE_CONNECTOR_DVID:
+       case DRM_MODE_CONNECTOR_HDMIA:
+       case DRM_MODE_CONNECTOR_HDMIB:
+       default:
+               if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+                       return ATOM_ENCODER_MODE_HDMI;
+               else
+                       return ATOM_ENCODER_MODE_DVI;
+               break;
+       case DRM_MODE_CONNECTOR_LVDS:
+               return ATOM_ENCODER_MODE_LVDS;
+               break;
+       case DRM_MODE_CONNECTOR_DisplayPort:
+               /*if (radeon_output->MonType == MT_DP)
+                 return ATOM_ENCODER_MODE_DP;
+                 else*/
+               if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+                       return ATOM_ENCODER_MODE_HDMI;
+               else
+                       return ATOM_ENCODER_MODE_DVI;
+               break;
+       case CONNECTOR_DVI_A:
+       case CONNECTOR_VGA:
+               return ATOM_ENCODER_MODE_CRT;
+               break;
+       case CONNECTOR_STV:
+       case CONNECTOR_CTV:
+       case CONNECTOR_DIN:
+               /* fix me */
+               return ATOM_ENCODER_MODE_TV;
+               /*return ATOM_ENCODER_MODE_CV;*/
+               break;
+       }
+}
+
+static void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       DIG_ENCODER_CONTROL_PS_ALLOCATION args;
+       int index = 0, num = 0;
+       uint8_t frev, crev;
+       struct radeon_encoder_atom_dig *dig;
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+       struct radeon_connector_atom_dig *dig_connector;
+
+       connector = radeon_get_connector_for_encoder(encoder);
+       if (!connector)
+               return;
+
+       radeon_connector = to_radeon_connector(connector);
+
+       if (!radeon_connector->con_priv)
+               return;
+
+       dig_connector = radeon_connector->con_priv;
+
+       if (!radeon_encoder->enc_priv)
+               return;
+
+       dig = radeon_encoder->enc_priv;
+
+       memset(&args, 0, sizeof(args));
+
+       if (ASIC_IS_DCE32(rdev)) {
+               if (dig->dig_block)
+                       index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+               else
+                       index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+               num = dig->dig_block + 1;
+       } else {
+               switch (radeon_encoder->encoder_id) {
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+                       index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+                       num = 1;
+                       break;
+               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+                       index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+                       num = 2;
+                       break;
+               }
+       }
+
+       atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+
+       args.ucAction = action;
+       args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+       if (ASIC_IS_DCE32(rdev)) {
+               switch (radeon_encoder->encoder_id) {
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+                       args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+                       break;
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+                       args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+                       break;
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+                       args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
+                       break;
+               }
+       } else {
+               switch (radeon_encoder->encoder_id) {
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+                       args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
+                       break;
+               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+                       args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
+                       break;
+               }
+       }
+
+       if (radeon_encoder->pixel_clock > 165000) {
+               args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
+               args.ucLaneNum = 8;
+       } else {
+               if (dig_connector->linkb)
+                       args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+               else
+                       args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+               args.ucLaneNum = 4;
+       }
+
+       args.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dig_transmitter_control {
+       DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
+       DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       union dig_transmitter_control args;
+       int index = 0, num = 0;
+       uint8_t frev, crev;
+       struct radeon_encoder_atom_dig *dig;
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+       struct radeon_connector_atom_dig *dig_connector;
+
+       connector = radeon_get_connector_for_encoder(encoder);
+       if (!connector)
+               return;
+
+       radeon_connector = to_radeon_connector(connector);
+
+       if (!radeon_encoder->enc_priv)
+               return;
+
+       dig = radeon_encoder->enc_priv;
+
+       if (!radeon_connector->con_priv)
+               return;
+
+       dig_connector = radeon_connector->con_priv;
+
+       memset(&args, 0, sizeof(args));
+
+       if (ASIC_IS_DCE32(rdev))
+               index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+       else {
+               switch (radeon_encoder->encoder_id) {
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+                       index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
+                       break;
+               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+                       index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
+                       break;
+               }
+       }
+
+       atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+
+       args.v1.ucAction = action;
+
+       if (ASIC_IS_DCE32(rdev)) {
+               if (radeon_encoder->pixel_clock > 165000) {
+                       args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 2) / 100);
+                       args.v2.acConfig.fDualLinkConnector = 1;
+               } else {
+                       args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 4) / 100);
+               }
+               if (dig->dig_block)
+                       args.v2.acConfig.ucEncoderSel = 1;
+
+               switch (radeon_encoder->encoder_id) {
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+                       args.v2.acConfig.ucTransmitterSel = 0;
+                       num = 0;
+                       break;
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+                       args.v2.acConfig.ucTransmitterSel = 1;
+                       num = 1;
+                       break;
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+                       args.v2.acConfig.ucTransmitterSel = 2;
+                       num = 2;
+                       break;
+               }
+
+               if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+                       if (dig->coherent_mode)
+                               args.v2.acConfig.fCoherentMode = 1;
+               }
+       } else {
+               args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+               args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock) / 10);
+
+               switch (radeon_encoder->encoder_id) {
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+                       args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+                       if (rdev->flags & RADEON_IS_IGP) {
+                               if (radeon_encoder->pixel_clock > 165000) {
+                                       args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
+                                                            ATOM_TRANSMITTER_CONFIG_LINKA_B);
+                                       if (dig_connector->igp_lane_info & 0x3)
+                                               args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+                                       else if (dig_connector->igp_lane_info & 0xc)
+                                               args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+                               } else {
+                                       args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+                                       if (dig_connector->igp_lane_info & 0x1)
+                                               args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+                                       else if (dig_connector->igp_lane_info & 0x2)
+                                               args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+                                       else if (dig_connector->igp_lane_info & 0x4)
+                                               args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+                                       else if (dig_connector->igp_lane_info & 0x8)
+                                               args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+                               }
+                       } else {
+                               if (radeon_encoder->pixel_clock > 165000)
+                                       args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
+                                                            ATOM_TRANSMITTER_CONFIG_LINKA_B |
+                                                            ATOM_TRANSMITTER_CONFIG_LANE_0_7);
+                               else {
+                                       if (dig_connector->linkb)
+                                               args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+                                       else
+                                               args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+                               }
+                       }
+                       break;
+               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+                       args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+                       if (radeon_encoder->pixel_clock > 165000)
+                               args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
+                                                    ATOM_TRANSMITTER_CONFIG_LINKA_B |
+                                                    ATOM_TRANSMITTER_CONFIG_LANE_0_7);
+                       else {
+                               if (dig_connector->linkb)
+                                       args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+                               else
+                                       args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+                       }
+                       break;
+               }
+
+               if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+                       if (dig->coherent_mode)
+                               args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+               }
+       }
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
+{
+
+       WREG32(0x659C, 0x0);
+       WREG32(0x6594, 0x705);
+       WREG32(0x65A4, 0x10001);
+       WREG32(0x65D8, 0x0);
+       WREG32(0x65B0, 0x0);
+       WREG32(0x65C0, 0x0);
+       WREG32(0x65D4, 0x0);
+       WREG32(0x6578, 0x0);
+       WREG32(0x657C, 0x841880A8);
+       WREG32(0x6578, 0x1);
+       WREG32(0x657C, 0x84208680);
+       WREG32(0x6578, 0x2);
+       WREG32(0x657C, 0xBFF880B0);
+       WREG32(0x6578, 0x100);
+       WREG32(0x657C, 0x83D88088);
+       WREG32(0x6578, 0x101);
+       WREG32(0x657C, 0x84608680);
+       WREG32(0x6578, 0x102);
+       WREG32(0x657C, 0xBFF080D0);
+       WREG32(0x6578, 0x200);
+       WREG32(0x657C, 0x83988068);
+       WREG32(0x6578, 0x201);
+       WREG32(0x657C, 0x84A08680);
+       WREG32(0x6578, 0x202);
+       WREG32(0x657C, 0xBFF080F8);
+       WREG32(0x6578, 0x300);
+       WREG32(0x657C, 0x83588058);
+       WREG32(0x6578, 0x301);
+       WREG32(0x657C, 0x84E08660);
+       WREG32(0x6578, 0x302);
+       WREG32(0x657C, 0xBFF88120);
+       WREG32(0x6578, 0x400);
+       WREG32(0x657C, 0x83188040);
+       WREG32(0x6578, 0x401);
+       WREG32(0x657C, 0x85008660);
+       WREG32(0x6578, 0x402);
+       WREG32(0x657C, 0xBFF88150);
+       WREG32(0x6578, 0x500);
+       WREG32(0x657C, 0x82D88030);
+       WREG32(0x6578, 0x501);
+       WREG32(0x657C, 0x85408640);
+       WREG32(0x6578, 0x502);
+       WREG32(0x657C, 0xBFF88180);
+       WREG32(0x6578, 0x600);
+       WREG32(0x657C, 0x82A08018);
+       WREG32(0x6578, 0x601);
+       WREG32(0x657C, 0x85808620);
+       WREG32(0x6578, 0x602);
+       WREG32(0x657C, 0xBFF081B8);
+       WREG32(0x6578, 0x700);
+       WREG32(0x657C, 0x82608010);
+       WREG32(0x6578, 0x701);
+       WREG32(0x657C, 0x85A08600);
+       WREG32(0x6578, 0x702);
+       WREG32(0x657C, 0x800081F0);
+       WREG32(0x6578, 0x800);
+       WREG32(0x657C, 0x8228BFF8);
+       WREG32(0x6578, 0x801);
+       WREG32(0x657C, 0x85E085E0);
+       WREG32(0x6578, 0x802);
+       WREG32(0x657C, 0xBFF88228);
+       WREG32(0x6578, 0x10000);
+       WREG32(0x657C, 0x82A8BF00);
+       WREG32(0x6578, 0x10001);
+       WREG32(0x657C, 0x82A08CC0);
+       WREG32(0x6578, 0x10002);
+       WREG32(0x657C, 0x8008BEF8);
+       WREG32(0x6578, 0x10100);
+       WREG32(0x657C, 0x81F0BF28);
+       WREG32(0x6578, 0x10101);
+       WREG32(0x657C, 0x83608CA0);
+       WREG32(0x6578, 0x10102);
+       WREG32(0x657C, 0x8018BED0);
+       WREG32(0x6578, 0x10200);
+       WREG32(0x657C, 0x8148BF38);
+       WREG32(0x6578, 0x10201);
+       WREG32(0x657C, 0x84408C80);
+       WREG32(0x6578, 0x10202);
+       WREG32(0x657C, 0x8008BEB8);
+       WREG32(0x6578, 0x10300);
+       WREG32(0x657C, 0x80B0BF78);
+       WREG32(0x6578, 0x10301);
+       WREG32(0x657C, 0x85008C20);
+       WREG32(0x6578, 0x10302);
+       WREG32(0x657C, 0x8020BEA0);
+       WREG32(0x6578, 0x10400);
+       WREG32(0x657C, 0x8028BF90);
+       WREG32(0x6578, 0x10401);
+       WREG32(0x657C, 0x85E08BC0);
+       WREG32(0x6578, 0x10402);
+       WREG32(0x657C, 0x8018BE90);
+       WREG32(0x6578, 0x10500);
+       WREG32(0x657C, 0xBFB8BFB0);
+       WREG32(0x6578, 0x10501);
+       WREG32(0x657C, 0x86C08B40);
+       WREG32(0x6578, 0x10502);
+       WREG32(0x657C, 0x8010BE90);
+       WREG32(0x6578, 0x10600);
+       WREG32(0x657C, 0xBF58BFC8);
+       WREG32(0x6578, 0x10601);
+       WREG32(0x657C, 0x87A08AA0);
+       WREG32(0x6578, 0x10602);
+       WREG32(0x657C, 0x8010BE98);
+       WREG32(0x6578, 0x10700);
+       WREG32(0x657C, 0xBF10BFF0);
+       WREG32(0x6578, 0x10701);
+       WREG32(0x657C, 0x886089E0);
+       WREG32(0x6578, 0x10702);
+       WREG32(0x657C, 0x8018BEB0);
+       WREG32(0x6578, 0x10800);
+       WREG32(0x657C, 0xBED8BFE8);
+       WREG32(0x6578, 0x10801);
+       WREG32(0x657C, 0x89408940);
+       WREG32(0x6578, 0x10802);
+       WREG32(0x657C, 0xBFE8BED8);
+       WREG32(0x6578, 0x20000);
+       WREG32(0x657C, 0x80008000);
+       WREG32(0x6578, 0x20001);
+       WREG32(0x657C, 0x90008000);
+       WREG32(0x6578, 0x20002);
+       WREG32(0x657C, 0x80008000);
+       WREG32(0x6578, 0x20003);
+       WREG32(0x657C, 0x80008000);
+       WREG32(0x6578, 0x20100);
+       WREG32(0x657C, 0x80108000);
+       WREG32(0x6578, 0x20101);
+       WREG32(0x657C, 0x8FE0BF70);
+       WREG32(0x6578, 0x20102);
+       WREG32(0x657C, 0xBFE880C0);
+       WREG32(0x6578, 0x20103);
+       WREG32(0x657C, 0x80008000);
+       WREG32(0x6578, 0x20200);
+       WREG32(0x657C, 0x8018BFF8);
+       WREG32(0x6578, 0x20201);
+       WREG32(0x657C, 0x8F80BF08);
+       WREG32(0x6578, 0x20202);
+       WREG32(0x657C, 0xBFD081A0);
+       WREG32(0x6578, 0x20203);
+       WREG32(0x657C, 0xBFF88000);
+       WREG32(0x6578, 0x20300);
+       WREG32(0x657C, 0x80188000);
+       WREG32(0x6578, 0x20301);
+       WREG32(0x657C, 0x8EE0BEC0);
+       WREG32(0x6578, 0x20302);
+       WREG32(0x657C, 0xBFB082A0);
+       WREG32(0x6578, 0x20303);
+       WREG32(0x657C, 0x80008000);
+       WREG32(0x6578, 0x20400);
+       WREG32(0x657C, 0x80188000);
+       WREG32(0x6578, 0x20401);
+       WREG32(0x657C, 0x8E00BEA0);
+       WREG32(0x6578, 0x20402);
+       WREG32(0x657C, 0xBF8883C0);
+       WREG32(0x6578, 0x20403);
+       WREG32(0x657C, 0x80008000);
+       WREG32(0x6578, 0x20500);
+       WREG32(0x657C, 0x80188000);
+       WREG32(0x6578, 0x20501);
+       WREG32(0x657C, 0x8D00BE90);
+       WREG32(0x6578, 0x20502);
+       WREG32(0x657C, 0xBF588500);
+       WREG32(0x6578, 0x20503);
+       WREG32(0x657C, 0x80008008);
+       WREG32(0x6578, 0x20600);
+       WREG32(0x657C, 0x80188000);
+       WREG32(0x6578, 0x20601);
+       WREG32(0x657C, 0x8BC0BE98);
+       WREG32(0x6578, 0x20602);
+       WREG32(0x657C, 0xBF308660);
+       WREG32(0x6578, 0x20603);
+       WREG32(0x657C, 0x80008008);
+       WREG32(0x6578, 0x20700);
+       WREG32(0x657C, 0x80108000);
+       WREG32(0x6578, 0x20701);
+       WREG32(0x657C, 0x8A80BEB0);
+       WREG32(0x6578, 0x20702);
+       WREG32(0x657C, 0xBF0087C0);
+       WREG32(0x6578, 0x20703);
+       WREG32(0x657C, 0x80008008);
+       WREG32(0x6578, 0x20800);
+       WREG32(0x657C, 0x80108000);
+       WREG32(0x6578, 0x20801);
+       WREG32(0x657C, 0x8920BED0);
+       WREG32(0x6578, 0x20802);
+       WREG32(0x657C, 0xBED08920);
+       WREG32(0x6578, 0x20803);
+       WREG32(0x657C, 0x80008010);
+       WREG32(0x6578, 0x30000);
+       WREG32(0x657C, 0x90008000);
+       WREG32(0x6578, 0x30001);
+       WREG32(0x657C, 0x80008000);
+       WREG32(0x6578, 0x30100);
+       WREG32(0x657C, 0x8FE0BF90);
+       WREG32(0x6578, 0x30101);
+       WREG32(0x657C, 0xBFF880A0);
+       WREG32(0x6578, 0x30200);
+       WREG32(0x657C, 0x8F60BF40);
+       WREG32(0x6578, 0x30201);
+       WREG32(0x657C, 0xBFE88180);
+       WREG32(0x6578, 0x30300);
+       WREG32(0x657C, 0x8EC0BF00);
+       WREG32(0x6578, 0x30301);
+       WREG32(0x657C, 0xBFC88280);
+       WREG32(0x6578, 0x30400);
+       WREG32(0x657C, 0x8DE0BEE0);
+       WREG32(0x6578, 0x30401);
+       WREG32(0x657C, 0xBFA083A0);
+       WREG32(0x6578, 0x30500);
+       WREG32(0x657C, 0x8CE0BED0);
+       WREG32(0x6578, 0x30501);
+       WREG32(0x657C, 0xBF7884E0);
+       WREG32(0x6578, 0x30600);
+       WREG32(0x657C, 0x8BA0BED8);
+       WREG32(0x6578, 0x30601);
+       WREG32(0x657C, 0xBF508640);
+       WREG32(0x6578, 0x30700);
+       WREG32(0x657C, 0x8A60BEE8);
+       WREG32(0x6578, 0x30701);
+       WREG32(0x657C, 0xBF2087A0);
+       WREG32(0x6578, 0x30800);
+       WREG32(0x657C, 0x8900BF00);
+       WREG32(0x6578, 0x30801);
+       WREG32(0x657C, 0xBF008900);
+}
+
+static void
+atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+       ENABLE_YUV_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
+       uint32_t temp, reg;
+
+       memset(&args, 0, sizeof(args));
+
+       if (rdev->family >= CHIP_R600)
+               reg = R600_BIOS_3_SCRATCH;
+       else
+               reg = RADEON_BIOS_3_SCRATCH;
+
+       /* XXX: fix up scratch reg handling */
+       temp = RREG32(reg);
+       if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+               WREG32(reg, (ATOM_S3_TV1_ACTIVE |
+                            (radeon_crtc->crtc_id << 18)));
+       else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+               WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
+       else
+               WREG32(reg, 0);
+
+       if (enable)
+               args.ucEnable = ATOM_ENABLE;
+       args.ucCRTC = radeon_crtc->crtc_id;
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+       WREG32(reg, temp);
+}
+
+static void
+atombios_overscan_setup(struct drm_encoder *encoder,
+                       struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+       SET_CRTC_OVERSCAN_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
+
+       memset(&args, 0, sizeof(args));
+
+       args.usOverscanRight = 0;
+       args.usOverscanLeft = 0;
+       args.usOverscanBottom = 0;
+       args.usOverscanTop = 0;
+       args.ucCRTC = radeon_crtc->crtc_id;
+
+       if (radeon_encoder->flags & RADEON_USE_RMX) {
+               if (radeon_encoder->rmx_type == RMX_FULL) {
+                       args.usOverscanRight = 0;
+                       args.usOverscanLeft = 0;
+                       args.usOverscanBottom = 0;
+                       args.usOverscanTop = 0;
+               } else if (radeon_encoder->rmx_type == RMX_CENTER) {
+                       args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
+                       args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
+                       args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+                       args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+               } else if (radeon_encoder->rmx_type == RMX_ASPECT) {
+                       int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
+                       int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
+
+                       if (a1 > a2) {
+                               args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
+                               args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
+                       } else if (a2 > a1) {
+                               args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
+                               args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
+                       }
+               }
+       }
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_scaler_setup(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+       ENABLE_SCALER_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
+       /* fixme - fill in enc_priv for atom dac */
+       enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+       if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
+               return;
+
+       memset(&args, 0, sizeof(args));
+
+       args.ucScaler = radeon_crtc->crtc_id;
+
+       if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+               switch (tv_std) {
+               case TV_STD_NTSC:
+               default:
+                       args.ucTVStandard = ATOM_TV_NTSC;
+                       break;
+               case TV_STD_PAL:
+                       args.ucTVStandard = ATOM_TV_PAL;
+                       break;
+               case TV_STD_PAL_M:
+                       args.ucTVStandard = ATOM_TV_PALM;
+                       break;
+               case TV_STD_PAL_60:
+                       args.ucTVStandard = ATOM_TV_PAL60;
+                       break;
+               case TV_STD_NTSC_J:
+                       args.ucTVStandard = ATOM_TV_NTSCJ;
+                       break;
+               case TV_STD_SCART_PAL:
+                       args.ucTVStandard = ATOM_TV_PAL; /* ??? */
+                       break;
+               case TV_STD_SECAM:
+                       args.ucTVStandard = ATOM_TV_SECAM;
+                       break;
+               case TV_STD_PAL_CN:
+                       args.ucTVStandard = ATOM_TV_PALCN;
+                       break;
+               }
+               args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+       } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) {
+               args.ucTVStandard = ATOM_TV_CV;
+               args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+       } else if (radeon_encoder->flags & RADEON_USE_RMX) {
+               if (radeon_encoder->rmx_type == RMX_FULL)
+                       args.ucEnable = ATOM_SCALER_EXPANSION;
+               else if (radeon_encoder->rmx_type == RMX_CENTER)
+                       args.ucEnable = ATOM_SCALER_CENTER;
+               else if (radeon_encoder->rmx_type == RMX_ASPECT)
+                       args.ucEnable = ATOM_SCALER_EXPANSION;
+       } else {
+               if (ASIC_IS_AVIVO(rdev))
+                       args.ucEnable = ATOM_SCALER_DISABLE;
+               else
+                       args.ucEnable = ATOM_SCALER_CENTER;
+       }
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+       if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
+           && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
+               atom_rv515_force_tv_scaler(rdev);
+       }
+
+}
+
+static void
+radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+       int index = 0;
+       bool is_dig = false;
+
+       memset(&args, 0, sizeof(args));
+
+       switch (radeon_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+               index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+               is_dig = true;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_DDI:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+               index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+               index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+               if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+                       index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+               else
+                       index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+               if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+                       index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+               else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+                       index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+               else
+                       index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+               if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+                       index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+               else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+                       index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+               else
+                       index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
+               break;
+       }
+
+       if (is_dig) {
+               switch (mode) {
+               case DRM_MODE_DPMS_ON:
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+                       break;
+               case DRM_MODE_DPMS_STANDBY:
+               case DRM_MODE_DPMS_SUSPEND:
+               case DRM_MODE_DPMS_OFF:
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+                       break;
+               }
+       } else {
+               switch (mode) {
+               case DRM_MODE_DPMS_ON:
+                       args.ucAction = ATOM_ENABLE;
+                       break;
+               case DRM_MODE_DPMS_STANDBY:
+               case DRM_MODE_DPMS_SUSPEND:
+               case DRM_MODE_DPMS_OFF:
+                       args.ucAction = ATOM_DISABLE;
+                       break;
+               }
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       }
+       radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+union crtc_sourc_param {
+       SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
+       SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+       union crtc_sourc_param args;
+       int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+       uint8_t frev, crev;
+
+       memset(&args, 0, sizeof(args));
+
+       atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+
+       switch (frev) {
+       case 1:
+               switch (crev) {
+               case 1:
+               default:
+                       if (ASIC_IS_AVIVO(rdev))
+                               args.v1.ucCRTC = radeon_crtc->crtc_id;
+                       else {
+                               if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+                                       args.v1.ucCRTC = radeon_crtc->crtc_id;
+                               } else {
+                                       args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
+                               }
+                       }
+                       switch (radeon_encoder->encoder_id) {
+                       case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+                       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+                               args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
+                               break;
+                       case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+                       case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+                               if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
+                                       args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
+                               else
+                                       args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
+                               break;
+                       case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+                       case ENCODER_OBJECT_ID_INTERNAL_DDI:
+                       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+                               args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
+                               break;
+                       case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+                       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+                               if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+                                       args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+                               else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+                                       args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+                               else
+                                       args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+                               break;
+                       case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+                       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+                               if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+                                       args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+                               else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+                                       args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+                               else
+                                       args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+                               break;
+                       }
+                       break;
+               case 2:
+                       args.v2.ucCRTC = radeon_crtc->crtc_id;
+                       args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+                       switch (radeon_encoder->encoder_id) {
+                       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+                       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+                       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+                               if (ASIC_IS_DCE32(rdev)) {
+                                       if (radeon_crtc->crtc_id)
+                                               args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+                                       else
+                                               args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+                               } else
+                                       args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+                               break;
+                       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+                               args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+                               break;
+                       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+                               args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+                               break;
+                       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+                               if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+                                       args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+                               else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+                                       args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+                               else
+                                       args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+                               break;
+                       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+                               if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+                                       args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+                               else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+                                       args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+                               else
+                                       args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+                               break;
+                       }
+                       break;
+               }
+               break;
+       default:
+               DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+               break;
+       }
+
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_apply_encoder_quirks(struct drm_encoder *encoder,
+                             struct drm_display_mode *mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+       /* Funky macbooks */
+       if ((dev->pdev->device == 0x71C5) &&
+           (dev->pdev->subsystem_vendor == 0x106b) &&
+           (dev->pdev->subsystem_device == 0x0080)) {
+               if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+                       uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
+
+                       lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+                       lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+
+                       WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
+               }
+       }
+
+       /* set scaler clears this on some chips */
+       if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
+               WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN);
+}
+
+static void
+radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+                            struct drm_display_mode *mode,
+                            struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+       if (radeon_encoder->enc_priv) {
+               struct radeon_encoder_atom_dig *dig;
+
+               dig = radeon_encoder->enc_priv;
+               dig->dig_block = radeon_crtc->crtc_id;
+       }
+       radeon_encoder->pixel_clock = adjusted_mode->clock;
+
+       radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+       atombios_overscan_setup(encoder, mode, adjusted_mode);
+       atombios_scaler_setup(encoder);
+       atombios_set_encoder_crtc_source(encoder);
+
+       if (ASIC_IS_AVIVO(rdev)) {
+               if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+                       atombios_yuv_setup(encoder, true);
+               else
+                       atombios_yuv_setup(encoder, false);
+       }
+
+       switch (radeon_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+       case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+       case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+               atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+               /* disable the encoder and transmitter */
+               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+               atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+
+               /* setup and enable the encoder and transmitter */
+               atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
+               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
+               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DDI:
+               atombios_ddia_setup(encoder, ATOM_ENABLE);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+               atombios_external_tmds_setup(encoder, ATOM_ENABLE);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+               atombios_dac_setup(encoder, ATOM_ENABLE);
+               if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+                       atombios_tv_setup(encoder, ATOM_ENABLE);
+               break;
+       }
+       atombios_apply_encoder_quirks(encoder, adjusted_mode);
+}
+
+static bool
+atombios_dac_load_detect(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+       if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
+                                      ATOM_DEVICE_CV_SUPPORT |
+                                      ATOM_DEVICE_CRT_SUPPORT)) {
+               DAC_LOAD_DETECTION_PS_ALLOCATION args;
+               int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
+               uint8_t frev, crev;
+
+               memset(&args, 0, sizeof(args));
+
+               atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+
+               args.sDacload.ucMisc = 0;
+
+               if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
+                   (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
+                       args.sDacload.ucDacType = ATOM_DAC_A;
+               else
+                       args.sDacload.ucDacType = ATOM_DAC_B;
+
+               if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT)
+                       args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
+               else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT)
+                       args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
+               else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+                       args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
+                       if (crev >= 3)
+                               args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+               } else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+                       args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
+                       if (crev >= 3)
+                               args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+               }
+
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+               return true;
+       } else
+               return false;
+}
+
+static enum drm_connector_status
+radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t bios_0_scratch;
+
+       if (!atombios_dac_load_detect(encoder)) {
+               DRM_DEBUG("detect returned false \n");
+               return connector_status_unknown;
+       }
+
+       if (rdev->family >= CHIP_R600)
+               bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+       else
+               bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+
+       DRM_DEBUG("Bios 0 scratch %x\n", bios_0_scratch);
+       if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+               if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+                       return connector_status_connected;
+       } else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+               if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+                       return connector_status_connected;
+       } else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+               if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+                       return connector_status_connected;
+       } else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+               if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+                       return connector_status_connected; /* CTV */
+               else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+                       return connector_status_connected; /* STV */
+       }
+       return connector_status_disconnected;
+}
+
+static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+{
+       radeon_atom_output_lock(encoder, true);
+       radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+{
+       radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+       radeon_atom_output_lock(encoder, false);
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
+       .dpms = radeon_atom_encoder_dpms,
+       .mode_fixup = radeon_atom_mode_fixup,
+       .prepare = radeon_atom_encoder_prepare,
+       .mode_set = radeon_atom_encoder_mode_set,
+       .commit = radeon_atom_encoder_commit,
+       /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
+       .dpms = radeon_atom_encoder_dpms,
+       .mode_fixup = radeon_atom_mode_fixup,
+       .prepare = radeon_atom_encoder_prepare,
+       .mode_set = radeon_atom_encoder_mode_set,
+       .commit = radeon_atom_encoder_commit,
+       .detect = radeon_atom_dac_detect,
+};
+
+void radeon_enc_destroy(struct drm_encoder *encoder)
+{
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       kfree(radeon_encoder->enc_priv);
+       drm_encoder_cleanup(encoder);
+       kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
+       .destroy = radeon_enc_destroy,
+};
+
+struct radeon_encoder_atom_dig *
+radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
+{
+       struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+       if (!dig)
+               return NULL;
+
+       /* coherent mode by default */
+       dig->coherent_mode = true;
+
+       return dig;
+}
+
+void
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+{
+       struct drm_encoder *encoder;
+       struct radeon_encoder *radeon_encoder;
+
+       /* see if we already added it */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               radeon_encoder = to_radeon_encoder(encoder);
+               if (radeon_encoder->encoder_id == encoder_id) {
+                       radeon_encoder->devices |= supported_device;
+                       return;
+               }
+
+       }
+
+       /* add a new one */
+       radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+       if (!radeon_encoder)
+               return;
+
+       encoder = &radeon_encoder->base;
+       encoder->possible_crtcs = 0x3;
+       encoder->possible_clones = 0;
+
+       radeon_encoder->enc_priv = NULL;
+
+       radeon_encoder->encoder_id = encoder_id;
+       radeon_encoder->devices = supported_device;
+
+       switch (radeon_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+       case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+       case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+               if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+                       radeon_encoder->rmx_type = RMX_FULL;
+                       drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+                       radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+               } else {
+                       drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+                       radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+               }
+               drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+               drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+               drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+               drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+               drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_DDI:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+               drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+               radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+               drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+               break;
+       }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
new file mode 100644 (file)
index 0000000..fa86d39
--- /dev/null
@@ -0,0 +1,825 @@
+/*
+ * Copyright Â© 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+    /*
+     *  Modularization
+     */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+struct radeon_fb_device {
+       struct radeon_device            *rdev;
+       struct drm_display_mode         *mode;
+       struct radeon_framebuffer       *rfb;
+       int                             crtc_count;
+       /* crtc currently bound to this */
+       uint32_t                        crtc_ids[2];
+};
+
+static int radeonfb_setcolreg(unsigned regno,
+                             unsigned red,
+                             unsigned green,
+                             unsigned blue,
+                             unsigned transp,
+                             struct fb_info *info)
+{
+       struct radeon_fb_device *rfbdev = info->par;
+       struct drm_device *dev = rfbdev->rdev->ddev;
+       struct drm_crtc *crtc;
+       int i;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+               struct drm_mode_set *modeset = &radeon_crtc->mode_set;
+               struct drm_framebuffer *fb = modeset->fb;
+
+               for (i = 0; i < rfbdev->crtc_count; i++) {
+                       if (crtc->base.id == rfbdev->crtc_ids[i]) {
+                               break;
+                       }
+               }
+               if (i == rfbdev->crtc_count) {
+                       continue;
+               }
+               if (regno > 255) {
+                       return 1;
+               }
+               if (fb->depth == 8) {
+                       radeon_crtc_fb_gamma_set(crtc, red, green, blue, regno);
+                       return 0;
+               }
+
+               if (regno < 16) {
+                       switch (fb->depth) {
+                       case 15:
+                               fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
+                                       ((green & 0xf800) >>  6) |
+                                       ((blue & 0xf800) >> 11);
+                               break;
+                       case 16:
+                               fb->pseudo_palette[regno] = (red & 0xf800) |
+                                       ((green & 0xfc00) >>  5) |
+                                       ((blue  & 0xf800) >> 11);
+                               break;
+                       case 24:
+                       case 32:
+                               fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
+                                       (green & 0xff00) |
+                                       ((blue  & 0xff00) >> 8);
+                               break;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int radeonfb_check_var(struct fb_var_screeninfo *var,
+                             struct fb_info *info)
+{
+       struct radeon_fb_device *rfbdev = info->par;
+       struct radeon_framebuffer *rfb = rfbdev->rfb;
+       struct drm_framebuffer *fb = &rfb->base;
+       int depth;
+
+       if (var->pixclock == -1 || !var->pixclock) {
+               return -EINVAL;
+       }
+       /* Need to resize the fb object !!! */
+       if (var->xres > fb->width || var->yres > fb->height) {
+               DRM_ERROR("Requested width/height is greater than current fb "
+                          "object %dx%d > %dx%d\n", var->xres, var->yres,
+                          fb->width, fb->height);
+               DRM_ERROR("Need resizing code.\n");
+               return -EINVAL;
+       }
+
+       switch (var->bits_per_pixel) {
+       case 16:
+               depth = (var->green.length == 6) ? 16 : 15;
+               break;
+       case 32:
+               depth = (var->transp.length > 0) ? 32 : 24;
+               break;
+       default:
+               depth = var->bits_per_pixel;
+               break;
+       }
+
+       switch (depth) {
+       case 8:
+               var->red.offset = 0;
+               var->green.offset = 0;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 15:
+               var->red.offset = 10;
+               var->green.offset = 5;
+               var->blue.offset = 0;
+               var->red.length = 5;
+               var->green.length = 5;
+               var->blue.length = 5;
+               var->transp.length = 1;
+               var->transp.offset = 15;
+               break;
+       case 16:
+               var->red.offset = 11;
+               var->green.offset = 5;
+               var->blue.offset = 0;
+               var->red.length = 5;
+               var->green.length = 6;
+               var->blue.length = 5;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 24:
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 32:
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 8;
+               var->transp.offset = 24;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/* this will let fbcon do the mode init */
+static int radeonfb_set_par(struct fb_info *info)
+{
+       struct radeon_fb_device *rfbdev = info->par;
+       struct drm_device *dev = rfbdev->rdev->ddev;
+       struct fb_var_screeninfo *var = &info->var;
+       struct drm_crtc *crtc;
+       int ret;
+       int i;
+
+       if (var->pixclock != -1) {
+               DRM_ERROR("PIXEL CLCOK SET\n");
+               return -EINVAL;
+       }
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+               for (i = 0; i < rfbdev->crtc_count; i++) {
+                       if (crtc->base.id == rfbdev->crtc_ids[i]) {
+                               break;
+                       }
+               }
+               if (i == rfbdev->crtc_count) {
+                       continue;
+               }
+               if (crtc->fb == radeon_crtc->mode_set.fb) {
+                       mutex_lock(&dev->mode_config.mutex);
+                       ret = crtc->funcs->set_config(&radeon_crtc->mode_set);
+                       mutex_unlock(&dev->mode_config.mutex);
+                       if (ret) {
+                               return ret;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int radeonfb_pan_display(struct fb_var_screeninfo *var,
+                               struct fb_info *info)
+{
+       struct radeon_fb_device *rfbdev = info->par;
+       struct drm_device *dev = rfbdev->rdev->ddev;
+       struct drm_mode_set *modeset;
+       struct drm_crtc *crtc;
+       struct radeon_crtc *radeon_crtc;
+       int ret = 0;
+       int i;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               for (i = 0; i < rfbdev->crtc_count; i++) {
+                       if (crtc->base.id == rfbdev->crtc_ids[i]) {
+                               break;
+                       }
+               }
+
+               if (i == rfbdev->crtc_count) {
+                       continue;
+               }
+
+               radeon_crtc = to_radeon_crtc(crtc);
+               modeset = &radeon_crtc->mode_set;
+
+               modeset->x = var->xoffset;
+               modeset->y = var->yoffset;
+
+               if (modeset->num_connectors) {
+                       mutex_lock(&dev->mode_config.mutex);
+                       ret = crtc->funcs->set_config(modeset);
+                       mutex_unlock(&dev->mode_config.mutex);
+                       if (!ret) {
+                               info->var.xoffset = var->xoffset;
+                               info->var.yoffset = var->yoffset;
+                       }
+               }
+       }
+       return ret;
+}
+
+static void radeonfb_on(struct fb_info *info)
+{
+       struct radeon_fb_device *rfbdev = info->par;
+       struct drm_device *dev = rfbdev->rdev->ddev;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       int i;
+
+       /*
+        * For each CRTC in this fb, find all associated encoders
+        * and turn them off, then turn off the CRTC.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+               for (i = 0; i < rfbdev->crtc_count; i++) {
+                       if (crtc->base.id == rfbdev->crtc_ids[i]) {
+                               break;
+                       }
+               }
+
+               mutex_lock(&dev->mode_config.mutex);
+               crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+               mutex_unlock(&dev->mode_config.mutex);
+
+               /* Found a CRTC on this fb, now find encoders */
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       if (encoder->crtc == crtc) {
+                               struct drm_encoder_helper_funcs *encoder_funcs;
+
+                               encoder_funcs = encoder->helper_private;
+                               mutex_lock(&dev->mode_config.mutex);
+                               encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+                               mutex_unlock(&dev->mode_config.mutex);
+                       }
+               }
+       }
+}
+
+static void radeonfb_off(struct fb_info *info, int dpms_mode)
+{
+       struct radeon_fb_device *rfbdev = info->par;
+       struct drm_device *dev = rfbdev->rdev->ddev;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       int i;
+
+       /*
+        * For each CRTC in this fb, find all associated encoders
+        * and turn them off, then turn off the CRTC.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+               for (i = 0; i < rfbdev->crtc_count; i++) {
+                       if (crtc->base.id == rfbdev->crtc_ids[i]) {
+                               break;
+                       }
+               }
+
+               /* Found a CRTC on this fb, now find encoders */
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       if (encoder->crtc == crtc) {
+                               struct drm_encoder_helper_funcs *encoder_funcs;
+
+                               encoder_funcs = encoder->helper_private;
+                               mutex_lock(&dev->mode_config.mutex);
+                               encoder_funcs->dpms(encoder, dpms_mode);
+                               mutex_unlock(&dev->mode_config.mutex);
+                       }
+               }
+               if (dpms_mode == DRM_MODE_DPMS_OFF) {
+                       mutex_lock(&dev->mode_config.mutex);
+                       crtc_funcs->dpms(crtc, dpms_mode);
+                       mutex_unlock(&dev->mode_config.mutex);
+               }
+       }
+}
+
+int radeonfb_blank(int blank, struct fb_info *info)
+{
+       switch (blank) {
+       case FB_BLANK_UNBLANK:
+               radeonfb_on(info);
+               break;
+       case FB_BLANK_NORMAL:
+               radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
+               break;
+       case FB_BLANK_HSYNC_SUSPEND:
+               radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
+               break;
+       case FB_BLANK_VSYNC_SUSPEND:
+               radeonfb_off(info, DRM_MODE_DPMS_SUSPEND);
+               break;
+       case FB_BLANK_POWERDOWN:
+               radeonfb_off(info, DRM_MODE_DPMS_OFF);
+               break;
+       }
+       return 0;
+}
+
+static struct fb_ops radeonfb_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = radeonfb_check_var,
+       .fb_set_par = radeonfb_set_par,
+       .fb_setcolreg = radeonfb_setcolreg,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = cfb_copyarea,
+       .fb_imageblit = cfb_imageblit,
+       .fb_pan_display = radeonfb_pan_display,
+       .fb_blank = radeonfb_blank,
+};
+
+/**
+ * Curretly it is assumed that the old framebuffer is reused.
+ *
+ * LOCKING
+ * caller should hold the mode config lock.
+ *
+ */
+int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct fb_info *info;
+       struct drm_framebuffer *fb;
+       struct drm_display_mode *mode = crtc->desired_mode;
+
+       fb = crtc->fb;
+       if (fb == NULL) {
+               return 1;
+       }
+       info = fb->fbdev;
+       if (info == NULL) {
+               return 1;
+       }
+       if (mode == NULL) {
+               return 1;
+       }
+       info->var.xres = mode->hdisplay;
+       info->var.right_margin = mode->hsync_start - mode->hdisplay;
+       info->var.hsync_len = mode->hsync_end - mode->hsync_start;
+       info->var.left_margin = mode->htotal - mode->hsync_end;
+       info->var.yres = mode->vdisplay;
+       info->var.lower_margin = mode->vsync_start - mode->vdisplay;
+       info->var.vsync_len = mode->vsync_end - mode->vsync_start;
+       info->var.upper_margin = mode->vtotal - mode->vsync_end;
+       info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
+       /* avoid overflow */
+       info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
+
+       return 0;
+}
+EXPORT_SYMBOL(radeonfb_resize);
+
+static struct drm_mode_set panic_mode;
+
+int radeonfb_panic(struct notifier_block *n, unsigned long ununsed,
+                 void *panic_str)
+{
+       DRM_ERROR("panic occurred, switching back to text console\n");
+       drm_crtc_helper_set_config(&panic_mode);
+       return 0;
+}
+EXPORT_SYMBOL(radeonfb_panic);
+
+static struct notifier_block paniced = {
+       .notifier_call = radeonfb_panic,
+};
+
+static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp)
+{
+       int aligned = width;
+       int align_large = (ASIC_IS_AVIVO(rdev));
+       int pitch_mask = 0;
+
+       switch (bpp / 8) {
+       case 1:
+               pitch_mask = align_large ? 255 : 127;
+               break;
+       case 2:
+               pitch_mask = align_large ? 127 : 31;
+               break;
+       case 3:
+       case 4:
+               pitch_mask = align_large ? 63 : 15;
+               break;
+       }
+
+       aligned += pitch_mask;
+       aligned &= ~pitch_mask;
+       return aligned;
+}
+
+int radeonfb_create(struct radeon_device *rdev,
+                   uint32_t fb_width, uint32_t fb_height,
+                   uint32_t surface_width, uint32_t surface_height,
+                   struct radeon_framebuffer **rfb_p)
+{
+       struct fb_info *info;
+       struct radeon_fb_device *rfbdev;
+       struct drm_framebuffer *fb;
+       struct radeon_framebuffer *rfb;
+       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_gem_object *gobj = NULL;
+       struct radeon_object *robj = NULL;
+       struct device *device = &rdev->pdev->dev;
+       int size, aligned_size, ret;
+       void *fbptr = NULL;
+
+       mode_cmd.width = surface_width;
+       mode_cmd.height = surface_height;
+       mode_cmd.bpp = 32;
+       /* need to align pitch with crtc limits */
+       mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8);
+       mode_cmd.depth = 24;
+
+       size = mode_cmd.pitch * mode_cmd.height;
+       aligned_size = ALIGN(size, PAGE_SIZE);
+
+       ret = radeon_gem_object_create(rdev, aligned_size, 0,
+                                      RADEON_GEM_DOMAIN_VRAM,
+                                      false, ttm_bo_type_kernel,
+                                      false, &gobj);
+       if (ret) {
+               printk(KERN_ERR "failed to allocate framebuffer\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+       robj = gobj->driver_private;
+
+       mutex_lock(&rdev->ddev->struct_mutex);
+       fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
+       if (fb == NULL) {
+               DRM_ERROR("failed to allocate fb.\n");
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+
+       list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
+
+       rfb = to_radeon_framebuffer(fb);
+       *rfb_p = rfb;
+       rdev->fbdev_rfb = rfb;
+
+       info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
+       if (info == NULL) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+       rfbdev = info->par;
+
+       ret = radeon_object_kmap(robj, &fbptr);
+       if (ret) {
+               goto out_unref;
+       }
+
+       strcpy(info->fix.id, "radeondrmfb");
+       info->fix.type = FB_TYPE_PACKED_PIXELS;
+       info->fix.visual = FB_VISUAL_TRUECOLOR;
+       info->fix.type_aux = 0;
+       info->fix.xpanstep = 1; /* doing it in hw */
+       info->fix.ypanstep = 1; /* doing it in hw */
+       info->fix.ywrapstep = 0;
+       info->fix.accel = FB_ACCEL_I830;
+       info->fix.type_aux = 0;
+       info->flags = FBINFO_DEFAULT;
+       info->fbops = &radeonfb_ops;
+       info->fix.line_length = fb->pitch;
+       info->screen_base = fbptr;
+       info->fix.smem_start = (unsigned long)fbptr;
+       info->fix.smem_len = size;
+       info->screen_base = fbptr;
+       info->screen_size = size;
+       info->pseudo_palette = fb->pseudo_palette;
+       info->var.xres_virtual = fb->width;
+       info->var.yres_virtual = fb->height;
+       info->var.bits_per_pixel = fb->bits_per_pixel;
+       info->var.xoffset = 0;
+       info->var.yoffset = 0;
+       info->var.activate = FB_ACTIVATE_NOW;
+       info->var.height = -1;
+       info->var.width = -1;
+       info->var.xres = fb_width;
+       info->var.yres = fb_height;
+       info->fix.mmio_start = pci_resource_start(rdev->pdev, 2);
+       info->fix.mmio_len = pci_resource_len(rdev->pdev, 2);
+       info->pixmap.size = 64*1024;
+       info->pixmap.buf_align = 8;
+       info->pixmap.access_align = 32;
+       info->pixmap.flags = FB_PIXMAP_SYSTEM;
+       info->pixmap.scan_align = 1;
+       if (info->screen_base == NULL) {
+               ret = -ENOSPC;
+               goto out_unref;
+       }
+       DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
+       DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
+       DRM_INFO("size %lu\n", (unsigned long)size);
+       DRM_INFO("fb depth is %d\n", fb->depth);
+       DRM_INFO("   pitch is %d\n", fb->pitch);
+
+       switch (fb->depth) {
+       case 8:
+               info->var.red.offset = 0;
+               info->var.green.offset = 0;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8; /* 8bit DAC */
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 15:
+               info->var.red.offset = 10;
+               info->var.green.offset = 5;
+               info->var.blue.offset = 0;
+               info->var.red.length = 5;
+               info->var.green.length = 5;
+               info->var.blue.length = 5;
+               info->var.transp.offset = 15;
+               info->var.transp.length = 1;
+               break;
+       case 16:
+               info->var.red.offset = 11;
+               info->var.green.offset = 5;
+               info->var.blue.offset = 0;
+               info->var.red.length = 5;
+               info->var.green.length = 6;
+               info->var.blue.length = 5;
+               info->var.transp.offset = 0;
+               break;
+       case 24:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8;
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 32:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8;
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 24;
+               info->var.transp.length = 8;
+               break;
+       default:
+               break;
+       }
+
+       fb->fbdev = info;
+       rfbdev->rfb = rfb;
+       rfbdev->rdev = rdev;
+
+       mutex_unlock(&rdev->ddev->struct_mutex);
+       return 0;
+
+out_unref:
+       if (robj) {
+               radeon_object_kunmap(robj);
+       }
+       if (ret) {
+               list_del(&fb->filp_head);
+               drm_gem_object_unreference(gobj);
+               drm_framebuffer_cleanup(fb);
+               kfree(fb);
+       }
+       drm_gem_object_unreference(gobj);
+       mutex_unlock(&rdev->ddev->struct_mutex);
+out:
+       return ret;
+}
+
+static int radeonfb_single_fb_probe(struct radeon_device *rdev)
+{
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+       unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
+       unsigned int surface_width = 0, surface_height = 0;
+       int new_fb = 0;
+       int crtc_count = 0;
+       int ret, i, conn_count = 0;
+       struct radeon_framebuffer *rfb;
+       struct fb_info *info;
+       struct radeon_fb_device *rfbdev;
+       struct drm_mode_set *modeset = NULL;
+
+       /* first up get a count of crtcs now in use and new min/maxes width/heights */
+       list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
+               if (drm_helper_crtc_in_use(crtc)) {
+                       if (crtc->desired_mode) {
+                               if (crtc->desired_mode->hdisplay < fb_width)
+                                       fb_width = crtc->desired_mode->hdisplay;
+
+                               if (crtc->desired_mode->vdisplay < fb_height)
+                                       fb_height = crtc->desired_mode->vdisplay;
+
+                               if (crtc->desired_mode->hdisplay > surface_width)
+                                       surface_width = crtc->desired_mode->hdisplay;
+
+                               if (crtc->desired_mode->vdisplay > surface_height)
+                                       surface_height = crtc->desired_mode->vdisplay;
+                       }
+                       crtc_count++;
+               }
+       }
+
+       if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+               /* hmm everyone went away - assume VGA cable just fell out
+                  and will come back later. */
+               return 0;
+       }
+
+       /* do we have an fb already? */
+       if (list_empty(&rdev->ddev->mode_config.fb_kernel_list)) {
+               /* create an fb if we don't have one */
+               ret = radeonfb_create(rdev, fb_width, fb_height, surface_width, surface_height, &rfb);
+               if (ret) {
+                       return -EINVAL;
+               }
+               new_fb = 1;
+       } else {
+               struct drm_framebuffer *fb;
+               fb = list_first_entry(&rdev->ddev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head);
+               rfb = to_radeon_framebuffer(fb);
+
+               /* if someone hotplugs something bigger than we have already allocated, we are pwned.
+                  As really we can't resize an fbdev that is in the wild currently due to fbdev
+                  not really being designed for the lower layers moving stuff around under it.
+                  - so in the grand style of things - punt. */
+               if ((fb->width < surface_width) || (fb->height < surface_height)) {
+                       DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
+                       return -EINVAL;
+               }
+       }
+
+       info = rfb->base.fbdev;
+       rdev->fbdev_info = info;
+       rfbdev = info->par;
+
+       crtc_count = 0;
+       /* okay we need to setup new connector sets in the crtcs */
+       list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
+               struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+               modeset = &radeon_crtc->mode_set;
+               modeset->fb = &rfb->base;
+               conn_count = 0;
+               list_for_each_entry(connector, &rdev->ddev->mode_config.connector_list, head) {
+                       if (connector->encoder)
+                               if (connector->encoder->crtc == modeset->crtc) {
+                                       modeset->connectors[conn_count] = connector;
+                                       conn_count++;
+                                       if (conn_count > RADEONFB_CONN_LIMIT)
+                                               BUG();
+                               }
+               }
+
+               for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++)
+                       modeset->connectors[i] = NULL;
+
+
+               rfbdev->crtc_ids[crtc_count++] = crtc->base.id;
+
+               modeset->num_connectors = conn_count;
+               if (modeset->crtc->desired_mode) {
+                       if (modeset->mode) {
+                               drm_mode_destroy(rdev->ddev, modeset->mode);
+                       }
+                       modeset->mode = drm_mode_duplicate(rdev->ddev,
+                                                          modeset->crtc->desired_mode);
+               }
+       }
+       rfbdev->crtc_count = crtc_count;
+
+       if (new_fb) {
+               info->var.pixclock = -1;
+               if (register_framebuffer(info) < 0)
+                       return -EINVAL;
+       } else {
+               radeonfb_set_par(info);
+       }
+       printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+              info->fix.id);
+
+       /* Switch back to kernel console on panic */
+       panic_mode = *modeset;
+       atomic_notifier_chain_register(&panic_notifier_list, &paniced);
+       printk(KERN_INFO "registered panic notifier\n");
+
+       return 0;
+}
+
+int radeonfb_probe(struct drm_device *dev)
+{
+       int ret;
+
+       /* something has changed in the lower levels of hell - deal with it
+          here */
+
+       /* two modes : a) 1 fb to rule all crtcs.
+                      b) one fb per crtc.
+          two actions 1) new connected device
+                      2) device removed.
+          case a/1 : if the fb surface isn't big enough - resize the surface fb.
+                     if the fb size isn't big enough - resize fb into surface.
+                     if everything big enough configure the new crtc/etc.
+          case a/2 : undo the configuration
+                     possibly resize down the fb to fit the new configuration.
+           case b/1 : see if it is on a new crtc - setup a new fb and add it.
+          case b/2 : teardown the new fb.
+       */
+       ret = radeonfb_single_fb_probe(dev->dev_private);
+       return ret;
+}
+EXPORT_SYMBOL(radeonfb_probe);
+
+int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+{
+       struct fb_info *info;
+       struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
+       struct radeon_object *robj;
+
+       if (!fb) {
+               return -EINVAL;
+       }
+       info = fb->fbdev;
+       if (info) {
+               robj = rfb->obj->driver_private;
+               unregister_framebuffer(info);
+               radeon_object_kunmap(robj);
+               framebuffer_release(info);
+       }
+
+       printk(KERN_INFO "unregistered panic notifier\n");
+       atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
+       memset(&panic_mode, 0, sizeof(struct drm_mode_set));
+       return 0;
+}
+EXPORT_SYMBOL(radeonfb_remove);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
new file mode 100644 (file)
index 0000000..96afbf5
--- /dev/null
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Dave Airlie
+ */
+#include <linux/seq_file.h>
+#include <asm/atomic.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
+{
+       unsigned long irq_flags;
+
+       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+       if (fence->emited) {
+               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+               return 0;
+       }
+       fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
+       if (!rdev->cp.ready) {
+               /* FIXME: cp is not running assume everythings is done right
+                * away
+                */
+               WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+       } else {
+               radeon_fence_ring_emit(rdev, fence);
+       }
+       fence->emited = true;
+       fence->timeout = jiffies + ((2000 * HZ) / 1000);
+       list_del(&fence->list);
+       list_add_tail(&fence->list, &rdev->fence_drv.emited);
+       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       return 0;
+}
+
+static bool radeon_fence_poll_locked(struct radeon_device *rdev)
+{
+       struct radeon_fence *fence;
+       struct list_head *i, *n;
+       uint32_t seq;
+       bool wake = false;
+
+       if (rdev == NULL) {
+               return true;
+       }
+       if (rdev->shutdown) {
+               return true;
+       }
+       seq = RREG32(rdev->fence_drv.scratch_reg);
+       rdev->fence_drv.last_seq = seq;
+       n = NULL;
+       list_for_each(i, &rdev->fence_drv.emited) {
+               fence = list_entry(i, struct radeon_fence, list);
+               if (fence->seq == seq) {
+                       n = i;
+                       break;
+               }
+       }
+       /* all fence previous to this one are considered as signaled */
+       if (n) {
+               i = n;
+               do {
+                       n = i->prev;
+                       list_del(i);
+                       list_add_tail(i, &rdev->fence_drv.signaled);
+                       fence = list_entry(i, struct radeon_fence, list);
+                       fence->signaled = true;
+                       i = n;
+               } while (i != &rdev->fence_drv.emited);
+               wake = true;
+       }
+       return wake;
+}
+
+static void radeon_fence_destroy(struct kref *kref)
+{
+       unsigned long irq_flags;
+        struct radeon_fence *fence;
+
+       fence = container_of(kref, struct radeon_fence, kref);
+       write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+       list_del(&fence->list);
+       fence->emited = false;
+       write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+       kfree(fence);
+}
+
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
+{
+       unsigned long irq_flags;
+
+       *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
+       if ((*fence) == NULL) {
+               return -ENOMEM;
+       }
+       kref_init(&((*fence)->kref));
+       (*fence)->rdev = rdev;
+       (*fence)->emited = false;
+       (*fence)->signaled = false;
+       (*fence)->seq = 0;
+       INIT_LIST_HEAD(&(*fence)->list);
+
+       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+       list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
+       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       return 0;
+}
+
+
+bool radeon_fence_signaled(struct radeon_fence *fence)
+{
+       struct radeon_device *rdev = fence->rdev;
+       unsigned long irq_flags;
+       bool signaled = false;
+
+       if (rdev->gpu_lockup) {
+               return true;
+       }
+       if (fence == NULL) {
+               return true;
+       }
+       write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+       signaled = fence->signaled;
+       /* if we are shuting down report all fence as signaled */
+       if (fence->rdev->shutdown) {
+               signaled = true;
+       }
+       if (!fence->emited) {
+               WARN(1, "Querying an unemited fence : %p !\n", fence);
+               signaled = true;
+       }
+       if (!signaled) {
+               radeon_fence_poll_locked(fence->rdev);
+               signaled = fence->signaled;
+       }
+       write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+       return signaled;
+}
+
+int radeon_fence_wait(struct radeon_fence *fence, bool interruptible)
+{
+       struct radeon_device *rdev;
+       unsigned long cur_jiffies;
+       unsigned long timeout;
+       bool expired = false;
+       int r;
+
+
+       if (fence == NULL) {
+               WARN(1, "Querying an invalid fence : %p !\n", fence);
+               return 0;
+       }
+       rdev = fence->rdev;
+       if (radeon_fence_signaled(fence)) {
+               return 0;
+       }
+retry:
+       cur_jiffies = jiffies;
+       timeout = HZ / 100;
+       if (time_after(fence->timeout, cur_jiffies)) {
+               timeout = fence->timeout - cur_jiffies;
+       }
+       if (interruptible) {
+               r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
+                               radeon_fence_signaled(fence), timeout);
+               if (unlikely(r == -ERESTARTSYS)) {
+                       return -ERESTART;
+               }
+       } else {
+               r = wait_event_timeout(rdev->fence_drv.queue,
+                        radeon_fence_signaled(fence), timeout);
+       }
+       if (unlikely(!radeon_fence_signaled(fence))) {
+               if (unlikely(r == 0)) {
+                       expired = true;
+               }
+               if (unlikely(expired)) {
+                       timeout = 1;
+                       if (time_after(cur_jiffies, fence->timeout)) {
+                               timeout = cur_jiffies - fence->timeout;
+                       }
+                       timeout = jiffies_to_msecs(timeout);
+                       if (timeout > 500) {
+                               DRM_ERROR("fence(%p:0x%08X) %lums timeout "
+                                         "going to reset GPU\n",
+                                         fence, fence->seq, timeout);
+                               radeon_gpu_reset(rdev);
+                               WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+                       }
+               }
+               goto retry;
+       }
+       if (unlikely(expired)) {
+               rdev->fence_drv.count_timeout++;
+               cur_jiffies = jiffies;
+               timeout = 1;
+               if (time_after(cur_jiffies, fence->timeout)) {
+                       timeout = cur_jiffies - fence->timeout;
+               }
+               timeout = jiffies_to_msecs(timeout);
+               DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
+                         fence, fence->seq, timeout);
+               DRM_ERROR("last signaled fence(0x%08X)\n",
+                         rdev->fence_drv.last_seq);
+       }
+       return 0;
+}
+
+int radeon_fence_wait_next(struct radeon_device *rdev)
+{
+       unsigned long irq_flags;
+       struct radeon_fence *fence;
+       int r;
+
+       if (rdev->gpu_lockup) {
+               return 0;
+       }
+       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+       if (list_empty(&rdev->fence_drv.emited)) {
+               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+               return 0;
+       }
+       fence = list_entry(rdev->fence_drv.emited.next,
+                          struct radeon_fence, list);
+       radeon_fence_ref(fence);
+       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       r = radeon_fence_wait(fence, false);
+       radeon_fence_unref(&fence);
+       return r;
+}
+
+int radeon_fence_wait_last(struct radeon_device *rdev)
+{
+       unsigned long irq_flags;
+       struct radeon_fence *fence;
+       int r;
+
+       if (rdev->gpu_lockup) {
+               return 0;
+       }
+       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+       if (list_empty(&rdev->fence_drv.emited)) {
+               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+               return 0;
+       }
+       fence = list_entry(rdev->fence_drv.emited.prev,
+                          struct radeon_fence, list);
+       radeon_fence_ref(fence);
+       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       r = radeon_fence_wait(fence, false);
+       radeon_fence_unref(&fence);
+       return r;
+}
+
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
+{
+       kref_get(&fence->kref);
+       return fence;
+}
+
+void radeon_fence_unref(struct radeon_fence **fence)
+{
+       struct radeon_fence *tmp = *fence;
+
+       *fence = NULL;
+       if (tmp) {
+               kref_put(&tmp->kref, &radeon_fence_destroy);
+       }
+}
+
+void radeon_fence_process(struct radeon_device *rdev)
+{
+       unsigned long irq_flags;
+       bool wake;
+
+       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+       wake = radeon_fence_poll_locked(rdev);
+       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       if (wake) {
+               wake_up_all(&rdev->fence_drv.queue);
+       }
+}
+
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+       unsigned long irq_flags;
+       int r;
+
+       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+       r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
+       if (r) {
+               DRM_ERROR("Fence failed to get a scratch register.");
+               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+               return r;
+       }
+       WREG32(rdev->fence_drv.scratch_reg, 0);
+       atomic_set(&rdev->fence_drv.seq, 0);
+       INIT_LIST_HEAD(&rdev->fence_drv.created);
+       INIT_LIST_HEAD(&rdev->fence_drv.emited);
+       INIT_LIST_HEAD(&rdev->fence_drv.signaled);
+       rdev->fence_drv.count_timeout = 0;
+       init_waitqueue_head(&rdev->fence_drv.queue);
+       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       if (radeon_debugfs_fence_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for fence !\n");
+       }
+       return 0;
+}
+
+void radeon_fence_driver_fini(struct radeon_device *rdev)
+{
+       unsigned long irq_flags;
+
+       wake_up_all(&rdev->fence_drv.queue);
+       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+       radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
+       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       DRM_INFO("radeon: fence finalized\n");
+}
+
+
+/*
+ * Fence debugfs
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_fence *fence;
+
+       seq_printf(m, "Last signaled fence 0x%08X\n",
+                  RREG32(rdev->fence_drv.scratch_reg));
+       if (!list_empty(&rdev->fence_drv.emited)) {
+                  fence = list_entry(rdev->fence_drv.emited.prev,
+                                     struct radeon_fence, list);
+                  seq_printf(m, "Last emited fence %p with 0x%08X\n",
+                             fence,  fence->seq);
+       }
+       return 0;
+}
+
+static struct drm_info_list radeon_debugfs_fence_list[] = {
+       {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
+};
+#endif
+
+int radeon_debugfs_fence_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
+#else
+       return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
new file mode 100644 (file)
index 0000000..90187d1
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
+#ifndef RADEON_FIXED_H
+#define RADEON_FIXED_H
+
+typedef union rfixed {
+       u32 full;
+} fixed20_12;
+
+
+#define rfixed_const(A) (u32)(((A) << 12))/*  + ((B + 0.000122)*4096)) */
+#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
+#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
+#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
+#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
+#define fixed_init(A) { .full = rfixed_const((A)) }
+#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
+#define rfixed_trunc(A) ((A).full >> 12)
+
+static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
+{
+       u64 tmp = ((u64)A.full << 13);
+
+       do_div(tmp, B.full);
+       tmp += 1;
+       tmp /= 2;
+       return lower_32_bits(tmp);
+}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
new file mode 100644 (file)
index 0000000..d343a15
--- /dev/null
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "radeon_reg.h"
+
+/*
+ * Common GART table functions.
+ */
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
+{
+       void *ptr;
+
+       ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
+                                  &rdev->gart.table_addr);
+       if (ptr == NULL) {
+               return -ENOMEM;
+       }
+#ifdef CONFIG_X86
+       if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+           rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+               set_memory_uc((unsigned long)ptr,
+                             rdev->gart.table_size >> PAGE_SHIFT);
+       }
+#endif
+       rdev->gart.table.ram.ptr = ptr;
+       memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
+       return 0;
+}
+
+void radeon_gart_table_ram_free(struct radeon_device *rdev)
+{
+       if (rdev->gart.table.ram.ptr == NULL) {
+               return;
+       }
+#ifdef CONFIG_X86
+       if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+           rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+               set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
+                             rdev->gart.table_size >> PAGE_SHIFT);
+       }
+#endif
+       pci_free_consistent(rdev->pdev, rdev->gart.table_size,
+                           (void *)rdev->gart.table.ram.ptr,
+                           rdev->gart.table_addr);
+       rdev->gart.table.ram.ptr = NULL;
+       rdev->gart.table_addr = 0;
+}
+
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
+{
+       uint64_t gpu_addr;
+       int r;
+
+       if (rdev->gart.table.vram.robj == NULL) {
+               r = radeon_object_create(rdev, NULL,
+                                        rdev->gart.table_size,
+                                        true,
+                                        RADEON_GEM_DOMAIN_VRAM,
+                                        false, &rdev->gart.table.vram.robj);
+               if (r) {
+                       return r;
+               }
+       }
+       r = radeon_object_pin(rdev->gart.table.vram.robj,
+                             RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+       if (r) {
+               radeon_object_unref(&rdev->gart.table.vram.robj);
+               return r;
+       }
+       r = radeon_object_kmap(rdev->gart.table.vram.robj,
+                              (void **)&rdev->gart.table.vram.ptr);
+       if (r) {
+               radeon_object_unpin(rdev->gart.table.vram.robj);
+               radeon_object_unref(&rdev->gart.table.vram.robj);
+               DRM_ERROR("radeon: failed to map gart vram table.\n");
+               return r;
+       }
+       rdev->gart.table_addr = gpu_addr;
+       return 0;
+}
+
+void radeon_gart_table_vram_free(struct radeon_device *rdev)
+{
+       if (rdev->gart.table.vram.robj == NULL) {
+               return;
+       }
+       radeon_object_kunmap(rdev->gart.table.vram.robj);
+       radeon_object_unpin(rdev->gart.table.vram.robj);
+       radeon_object_unref(&rdev->gart.table.vram.robj);
+}
+
+
+
+
+/*
+ * Common gart functions.
+ */
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+                       int pages)
+{
+       unsigned t;
+       unsigned p;
+       int i, j;
+
+       if (!rdev->gart.ready) {
+               WARN(1, "trying to unbind memory to unitialized GART !\n");
+               return;
+       }
+       t = offset / 4096;
+       p = t / (PAGE_SIZE / 4096);
+       for (i = 0; i < pages; i++, p++) {
+               if (rdev->gart.pages[p]) {
+                       pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
+                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+                       rdev->gart.pages[p] = NULL;
+                       rdev->gart.pages_addr[p] = 0;
+                       for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
+                               radeon_gart_set_page(rdev, t, 0);
+                       }
+               }
+       }
+       mb();
+       radeon_gart_tlb_flush(rdev);
+}
+
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+                    int pages, struct page **pagelist)
+{
+       unsigned t;
+       unsigned p;
+       uint64_t page_base;
+       int i, j;
+
+       if (!rdev->gart.ready) {
+               DRM_ERROR("trying to bind memory to unitialized GART !\n");
+               return -EINVAL;
+       }
+       t = offset / 4096;
+       p = t / (PAGE_SIZE / 4096);
+
+       for (i = 0; i < pages; i++, p++) {
+               /* we need to support large memory configurations */
+               /* assume that unbind have already been call on the range */
+               rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
+                                                       0, PAGE_SIZE,
+                                                       PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
+                       /* FIXME: failed to map page (return -ENOMEM?) */
+                       radeon_gart_unbind(rdev, offset, pages);
+                       return -ENOMEM;
+               }
+               rdev->gart.pages[p] = pagelist[i];
+               page_base = (uint32_t)rdev->gart.pages_addr[p];
+               for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
+                       radeon_gart_set_page(rdev, t, page_base);
+                       page_base += 4096;
+               }
+       }
+       mb();
+       radeon_gart_tlb_flush(rdev);
+       return 0;
+}
+
+int radeon_gart_init(struct radeon_device *rdev)
+{
+       if (rdev->gart.pages) {
+               return 0;
+       }
+       /* We need PAGE_SIZE >= 4096 */
+       if (PAGE_SIZE < 4096) {
+               DRM_ERROR("Page size is smaller than GPU page size!\n");
+               return -EINVAL;
+       }
+       /* Compute table size */
+       rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
+       rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
+       DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+                rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
+       /* Allocate pages table */
+       rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
+                                  GFP_KERNEL);
+       if (rdev->gart.pages == NULL) {
+               radeon_gart_fini(rdev);
+               return -ENOMEM;
+       }
+       rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
+                                       rdev->gart.num_cpu_pages, GFP_KERNEL);
+       if (rdev->gart.pages_addr == NULL) {
+               radeon_gart_fini(rdev);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void radeon_gart_fini(struct radeon_device *rdev)
+{
+       if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+               /* unbind pages */
+               radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
+       }
+       rdev->gart.ready = false;
+       kfree(rdev->gart.pages);
+       kfree(rdev->gart.pages_addr);
+       rdev->gart.pages = NULL;
+       rdev->gart.pages_addr = NULL;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
new file mode 100644 (file)
index 0000000..eb51603
--- /dev/null
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+int radeon_gem_object_init(struct drm_gem_object *obj)
+{
+       /* we do nothings here */
+       return 0;
+}
+
+void radeon_gem_object_free(struct drm_gem_object *gobj)
+{
+       struct radeon_object *robj = gobj->driver_private;
+
+       gobj->driver_private = NULL;
+       if (robj) {
+               radeon_object_unref(&robj);
+       }
+}
+
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
+                            int alignment, int initial_domain,
+                            bool discardable, bool kernel,
+                            bool interruptible,
+                            struct drm_gem_object **obj)
+{
+       struct drm_gem_object *gobj;
+       struct radeon_object *robj;
+       int r;
+
+       *obj = NULL;
+       gobj = drm_gem_object_alloc(rdev->ddev, size);
+       if (!gobj) {
+               return -ENOMEM;
+       }
+       /* At least align on page size */
+       if (alignment < PAGE_SIZE) {
+               alignment = PAGE_SIZE;
+       }
+       r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
+                                interruptible, &robj);
+       if (r) {
+               DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
+                         size, initial_domain, alignment);
+               mutex_lock(&rdev->ddev->struct_mutex);
+               drm_gem_object_unreference(gobj);
+               mutex_unlock(&rdev->ddev->struct_mutex);
+               return r;
+       }
+       gobj->driver_private = robj;
+       *obj = gobj;
+       return 0;
+}
+
+int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+                         uint64_t *gpu_addr)
+{
+       struct radeon_object *robj = obj->driver_private;
+       uint32_t flags;
+
+       switch (pin_domain) {
+       case RADEON_GEM_DOMAIN_VRAM:
+               flags = TTM_PL_FLAG_VRAM;
+               break;
+       case RADEON_GEM_DOMAIN_GTT:
+               flags = TTM_PL_FLAG_TT;
+               break;
+       default:
+               flags = TTM_PL_FLAG_SYSTEM;
+               break;
+       }
+       return radeon_object_pin(robj, flags, gpu_addr);
+}
+
+void radeon_gem_object_unpin(struct drm_gem_object *obj)
+{
+       struct radeon_object *robj = obj->driver_private;
+       radeon_object_unpin(robj);
+}
+
+int radeon_gem_set_domain(struct drm_gem_object *gobj,
+                         uint32_t rdomain, uint32_t wdomain)
+{
+       struct radeon_object *robj;
+       uint32_t domain;
+       int r;
+
+       /* FIXME: reeimplement */
+       robj = gobj->driver_private;
+       /* work out where to validate the buffer to */
+       domain = wdomain;
+       if (!domain) {
+               domain = rdomain;
+       }
+       if (!domain) {
+               /* Do nothings */
+               printk(KERN_WARNING "Set domain withou domain !\n");
+               return 0;
+       }
+       if (domain == RADEON_GEM_DOMAIN_CPU) {
+               /* Asking for cpu access wait for object idle */
+               r = radeon_object_wait(robj);
+               if (r) {
+                       printk(KERN_ERR "Failed to wait for object !\n");
+                       return r;
+               }
+       }
+       return 0;
+}
+
+int radeon_gem_init(struct radeon_device *rdev)
+{
+       INIT_LIST_HEAD(&rdev->gem.objects);
+       return 0;
+}
+
+void radeon_gem_fini(struct radeon_device *rdev)
+{
+       radeon_object_force_delete(rdev);
+}
+
+
+/*
+ * GEM ioctls.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *filp)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_radeon_gem_info *args = data;
+
+       args->vram_size = rdev->mc.vram_size;
+       /* FIXME: report somethings that makes sense */
+       args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024);
+       args->gart_size = rdev->mc.gtt_size;
+       return 0;
+}
+
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *filp)
+{
+       /* TODO: implement */
+       DRM_ERROR("unimplemented %s\n", __func__);
+       return -ENOSYS;
+}
+
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *filp)
+{
+       /* TODO: implement */
+       DRM_ERROR("unimplemented %s\n", __func__);
+       return -ENOSYS;
+}
+
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *filp)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_radeon_gem_create *args = data;
+       struct drm_gem_object *gobj;
+       uint32_t handle;
+       int r;
+
+       /* create a gem object to contain this object in */
+       args->size = roundup(args->size, PAGE_SIZE);
+       r = radeon_gem_object_create(rdev, args->size, args->alignment,
+                                    args->initial_domain, false,
+                                    false, true, &gobj);
+       if (r) {
+               return r;
+       }
+       r = drm_gem_handle_create(filp, gobj, &handle);
+       if (r) {
+               mutex_lock(&dev->struct_mutex);
+               drm_gem_object_unreference(gobj);
+               mutex_unlock(&dev->struct_mutex);
+               return r;
+       }
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_handle_unreference(gobj);
+       mutex_unlock(&dev->struct_mutex);
+       args->handle = handle;
+       return 0;
+}
+
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *filp)
+{
+       /* transition the BO to a domain -
+        * just validate the BO into a certain domain */
+       struct drm_radeon_gem_set_domain *args = data;
+       struct drm_gem_object *gobj;
+       struct radeon_object *robj;
+       int r;
+
+       /* for now if someone requests domain CPU -
+        * just make sure the buffer is finished with */
+
+       /* just do a BO wait for now */
+       gobj = drm_gem_object_lookup(dev, filp, args->handle);
+       if (gobj == NULL) {
+               return -EINVAL;
+       }
+       robj = gobj->driver_private;
+
+       r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
+
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(gobj);
+       mutex_unlock(&dev->struct_mutex);
+       return r;
+}
+
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *filp)
+{
+       struct drm_radeon_gem_mmap *args = data;
+       struct drm_gem_object *gobj;
+       struct radeon_object *robj;
+       int r;
+
+       gobj = drm_gem_object_lookup(dev, filp, args->handle);
+       if (gobj == NULL) {
+               return -EINVAL;
+       }
+       robj = gobj->driver_private;
+       r = radeon_object_mmap(robj, &args->addr_ptr);
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(gobj);
+       mutex_unlock(&dev->struct_mutex);
+       return r;
+}
+
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *filp)
+{
+       /* FIXME: implement */
+       return 0;
+}
+
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *filp)
+{
+       struct drm_radeon_gem_wait_idle *args = data;
+       struct drm_gem_object *gobj;
+       struct radeon_object *robj;
+       int r;
+
+       gobj = drm_gem_object_lookup(dev, filp, args->handle);
+       if (gobj == NULL) {
+               return -EINVAL;
+       }
+       robj = gobj->driver_private;
+       r = radeon_object_wait(robj);
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(gobj);
+       mutex_unlock(&dev->struct_mutex);
+       return r;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
new file mode 100644 (file)
index 0000000..71465ed
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+/**
+ * radeon_ddc_probe
+ *
+ */
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+{
+       u8 out_buf[] = { 0x0, 0x0};
+       u8 buf[2];
+       int ret;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = 0x50,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = out_buf,
+               },
+               {
+                       .addr = 0x50,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = buf,
+               }
+       };
+
+       ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+       if (ret == 2)
+               return true;
+
+       return false;
+}
+
+
+void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
+{
+       struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
+       uint32_t temp;
+       struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
+
+       /* RV410 appears to have a bug where the hw i2c in reset
+        * holds the i2c port in a bad state - switch hw i2c away before
+        * doing DDC - do this for all r200s/r300s/r400s for safety sake
+        */
+       if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+               if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+                       WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+                                               R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+               } else {
+                       WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+                                               R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+               }
+       }
+       if (lock_state) {
+               temp = RREG32(rec->a_clk_reg);
+               temp &= ~(rec->a_clk_mask);
+               WREG32(rec->a_clk_reg, temp);
+
+               temp = RREG32(rec->a_data_reg);
+               temp &= ~(rec->a_data_mask);
+               WREG32(rec->a_data_reg, temp);
+       }
+
+       temp = RREG32(rec->mask_clk_reg);
+       if (lock_state)
+               temp |= rec->mask_clk_mask;
+       else
+               temp &= ~rec->mask_clk_mask;
+       WREG32(rec->mask_clk_reg, temp);
+       temp = RREG32(rec->mask_clk_reg);
+
+       temp = RREG32(rec->mask_data_reg);
+       if (lock_state)
+               temp |= rec->mask_data_mask;
+       else
+               temp &= ~rec->mask_data_mask;
+       WREG32(rec->mask_data_reg, temp);
+       temp = RREG32(rec->mask_data_reg);
+}
+
+static int get_clock(void *i2c_priv)
+{
+       struct radeon_i2c_chan *i2c = i2c_priv;
+       struct radeon_device *rdev = i2c->dev->dev_private;
+       struct radeon_i2c_bus_rec *rec = &i2c->rec;
+       uint32_t val;
+
+       val = RREG32(rec->get_clk_reg);
+       val &= rec->get_clk_mask;
+
+       return (val != 0);
+}
+
+
+static int get_data(void *i2c_priv)
+{
+       struct radeon_i2c_chan *i2c = i2c_priv;
+       struct radeon_device *rdev = i2c->dev->dev_private;
+       struct radeon_i2c_bus_rec *rec = &i2c->rec;
+       uint32_t val;
+
+       val = RREG32(rec->get_data_reg);
+       val &= rec->get_data_mask;
+       return (val != 0);
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+       struct radeon_i2c_chan *i2c = i2c_priv;
+       struct radeon_device *rdev = i2c->dev->dev_private;
+       struct radeon_i2c_bus_rec *rec = &i2c->rec;
+       uint32_t val;
+
+       val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
+       val |= clock ? 0 : rec->put_clk_mask;
+       WREG32(rec->put_clk_reg, val);
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+       struct radeon_i2c_chan *i2c = i2c_priv;
+       struct radeon_device *rdev = i2c->dev->dev_private;
+       struct radeon_i2c_bus_rec *rec = &i2c->rec;
+       uint32_t val;
+
+       val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
+       val |= data ? 0 : rec->put_data_mask;
+       WREG32(rec->put_data_reg, val);
+}
+
+struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+               struct radeon_i2c_bus_rec *rec,
+               const char *name)
+{
+       struct radeon_i2c_chan *i2c;
+       int ret;
+
+       i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
+       if (i2c == NULL)
+               return NULL;
+
+       i2c->adapter.owner = THIS_MODULE;
+       i2c->adapter.algo_data = &i2c->algo;
+       i2c->dev = dev;
+       i2c->algo.setsda = set_data;
+       i2c->algo.setscl = set_clock;
+       i2c->algo.getsda = get_data;
+       i2c->algo.getscl = get_clock;
+       i2c->algo.udelay = 20;
+       /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
+        * make this, 2 jiffies is a lot more reliable */
+       i2c->algo.timeout = 2;
+       i2c->algo.data = i2c;
+       i2c->rec = *rec;
+       i2c_set_adapdata(&i2c->adapter, i2c);
+
+       ret = i2c_bit_add_bus(&i2c->adapter);
+       if (ret) {
+               DRM_INFO("Failed to register i2c %s\n", name);
+               goto out_free;
+       }
+
+       return i2c;
+out_free:
+       drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
+       return NULL;
+
+}
+
+void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
+{
+       if (!i2c)
+               return;
+
+       i2c_del_adapter(&i2c->adapter);
+       drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
+}
+
+struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
+{
+       return NULL;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
new file mode 100644 (file)
index 0000000..491d569
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_reg.h"
+#include "radeon_microcode.h"
+#include "radeon.h"
+#include "atom.h"
+
+static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
+{
+       uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
+       uint32_t irq_mask = RADEON_SW_INT_TEST;
+
+       if (irqs) {
+               WREG32(RADEON_GEN_INT_STATUS, irqs);
+       }
+       return irqs & irq_mask;
+}
+
+int r100_irq_set(struct radeon_device *rdev)
+{
+       uint32_t tmp = 0;
+
+       if (rdev->irq.sw_int) {
+               tmp |= RADEON_SW_INT_ENABLE;
+       }
+       /* Todo go through CRTC and enable vblank int or not */
+       WREG32(RADEON_GEN_INT_CNTL, tmp);
+       return 0;
+}
+
+int r100_irq_process(struct radeon_device *rdev)
+{
+       uint32_t status;
+
+       status = r100_irq_ack(rdev);
+       if (!status) {
+               return IRQ_NONE;
+       }
+       while (status) {
+               /* SW interrupt */
+               if (status & RADEON_SW_INT_TEST) {
+                       radeon_fence_process(rdev);
+               }
+               status = r100_irq_ack(rdev);
+       }
+       return IRQ_HANDLED;
+}
+
+int rs600_irq_set(struct radeon_device *rdev)
+{
+       uint32_t tmp = 0;
+
+       if (rdev->irq.sw_int) {
+               tmp |= RADEON_SW_INT_ENABLE;
+       }
+       WREG32(RADEON_GEN_INT_CNTL, tmp);
+       /* Todo go through CRTC and enable vblank int or not */
+       WREG32(R500_DxMODE_INT_MASK, 0);
+       return 0;
+}
+
+irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
+{
+       struct drm_device *dev = (struct drm_device *) arg;
+       struct radeon_device *rdev = dev->dev_private;
+
+       return radeon_irq_process(rdev);
+}
+
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       unsigned i;
+
+       /* Disable *all* interrupts */
+       rdev->irq.sw_int = false;
+       for (i = 0; i < 2; i++) {
+               rdev->irq.crtc_vblank_int[i] = false;
+       }
+       radeon_irq_set(rdev);
+       /* Clear bits */
+       radeon_irq_process(rdev);
+}
+
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+
+       dev->max_vblank_count = 0x001fffff;
+       rdev->irq.sw_int = true;
+       radeon_irq_set(rdev);
+       return 0;
+}
+
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       unsigned i;
+
+       if (rdev == NULL) {
+               return;
+       }
+       /* Disable *all* interrupts */
+       rdev->irq.sw_int = false;
+       for (i = 0; i < 2; i++) {
+               rdev->irq.crtc_vblank_int[i] = false;
+       }
+       radeon_irq_set(rdev);
+}
+
+int radeon_irq_kms_init(struct radeon_device *rdev)
+{
+       int r = 0;
+
+       r = drm_vblank_init(rdev->ddev, 2);
+       if (r) {
+               return r;
+       }
+       drm_irq_install(rdev->ddev);
+       rdev->irq.installed = true;
+       DRM_INFO("radeon: irq initialized.\n");
+       return 0;
+}
+
+void radeon_irq_kms_fini(struct radeon_device *rdev)
+{
+       if (rdev->irq.installed) {
+               rdev->irq.installed = false;
+               drm_irq_uninstall(rdev->ddev);
+       }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
new file mode 100644 (file)
index 0000000..b0ce44b
--- /dev/null
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "drm_sarea.h"
+#include "radeon.h"
+#include "radeon_drm.h"
+
+
+/*
+ * Driver load/unload
+ */
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
+{
+       struct radeon_device *rdev;
+       int r;
+
+       rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
+       if (rdev == NULL) {
+               return -ENOMEM;
+       }
+       dev->dev_private = (void *)rdev;
+
+       /* update BUS flag */
+       if (drm_device_is_agp(dev)) {
+               flags |= RADEON_IS_AGP;
+       } else if (drm_device_is_pcie(dev)) {
+               flags |= RADEON_IS_PCIE;
+       } else {
+               flags |= RADEON_IS_PCI;
+       }
+
+       r = radeon_device_init(rdev, dev, dev->pdev, flags);
+       if (r) {
+               DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n");
+               radeon_device_fini(rdev);
+               return r;
+       }
+       return 0;
+}
+
+int radeon_driver_unload_kms(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+
+       radeon_device_fini(rdev);
+       kfree(rdev);
+       dev->dev_private = NULL;
+       return 0;
+}
+
+
+/*
+ * Userspace get informations ioctl
+ */
+int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_radeon_info *info;
+       uint32_t *value_ptr;
+       uint32_t value;
+
+       info = data;
+       value_ptr = (uint32_t *)((unsigned long)info->value);
+       switch (info->request) {
+       case RADEON_INFO_DEVICE_ID:
+               value = dev->pci_device;
+               break;
+       case RADEON_INFO_NUM_GB_PIPES:
+               value = rdev->num_gb_pipes;
+               break;
+       default:
+               DRM_DEBUG("Invalid request %d\n", info->request);
+               return -EINVAL;
+       }
+       if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
+               DRM_ERROR("copy_to_user\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+
+/*
+ * Outdated mess for old drm with Xorg being in charge (void function now).
+ */
+int radeon_driver_firstopen_kms(struct drm_device *dev)
+{
+       return 0;
+}
+
+
+void radeon_driver_lastclose_kms(struct drm_device *dev)
+{
+}
+
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+{
+       return 0;
+}
+
+void radeon_driver_postclose_kms(struct drm_device *dev,
+                                struct drm_file *file_priv)
+{
+}
+
+void radeon_driver_preclose_kms(struct drm_device *dev,
+                               struct drm_file *file_priv)
+{
+}
+
+
+/*
+ * VBlank related functions.
+ */
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
+{
+       /* FIXME: implement */
+       return 0;
+}
+
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+{
+       /* FIXME: implement */
+       return 0;
+}
+
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+{
+       /* FIXME: implement */
+}
+
+
+/*
+ * For multiple master (like multiple X).
+ */
+struct drm_radeon_master_private {
+       drm_local_map_t *sarea;
+       drm_radeon_sarea_t *sarea_priv;
+};
+
+int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
+{
+       struct drm_radeon_master_private *master_priv;
+       unsigned long sareapage;
+       int ret;
+
+       master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
+       if (master_priv == NULL) {
+               return -ENOMEM;
+       }
+       /* prebuild the SAREA */
+       sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
+       ret = drm_addmap(dev, 0, sareapage, _DRM_SHM,
+                        _DRM_CONTAINS_LOCK|_DRM_DRIVER,
+                        &master_priv->sarea);
+       if (ret) {
+               DRM_ERROR("SAREA setup failed\n");
+               return ret;
+       }
+       master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
+       master_priv->sarea_priv->pfCurrentPage = 0;
+       master->driver_priv = master_priv;
+       return 0;
+}
+
+void radeon_master_destroy_kms(struct drm_device *dev,
+                              struct drm_master *master)
+{
+       struct drm_radeon_master_private *master_priv = master->driver_priv;
+
+       if (master_priv == NULL) {
+               return;
+       }
+       if (master_priv->sarea) {
+               drm_rmmap_locked(dev, master_priv->sarea);
+       }
+       drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
+       master->driver_priv = NULL;
+}
+
+
+/*
+ * IOCTL.
+ */
+int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       /* Not valid in KMS. */
+       return -EINVAL;
+}
+
+#define KMS_INVALID_IOCTL(name)                                                \
+int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
+{                                                                      \
+       DRM_ERROR("invalid ioctl with kms %s\n", __func__);             \
+       return -EINVAL;                                                 \
+}
+
+/*
+ * All these ioctls are invalid in kms world.
+ */
+KMS_INVALID_IOCTL(radeon_cp_init_kms)
+KMS_INVALID_IOCTL(radeon_cp_start_kms)
+KMS_INVALID_IOCTL(radeon_cp_stop_kms)
+KMS_INVALID_IOCTL(radeon_cp_reset_kms)
+KMS_INVALID_IOCTL(radeon_cp_idle_kms)
+KMS_INVALID_IOCTL(radeon_cp_resume_kms)
+KMS_INVALID_IOCTL(radeon_engine_reset_kms)
+KMS_INVALID_IOCTL(radeon_fullscreen_kms)
+KMS_INVALID_IOCTL(radeon_cp_swap_kms)
+KMS_INVALID_IOCTL(radeon_cp_clear_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
+KMS_INVALID_IOCTL(radeon_cp_indices_kms)
+KMS_INVALID_IOCTL(radeon_cp_texture_kms)
+KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
+KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
+KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
+KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
+KMS_INVALID_IOCTL(radeon_cp_flip_kms)
+KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
+KMS_INVALID_IOCTL(radeon_mem_free_kms)
+KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
+KMS_INVALID_IOCTL(radeon_irq_emit_kms)
+KMS_INVALID_IOCTL(radeon_irq_wait_kms)
+KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
+KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
+KMS_INVALID_IOCTL(radeon_surface_free_kms)
+
+
+struct drm_ioctl_desc radeon_ioctls_kms[] = {
+       DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+       /* KMS */
+       DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
+};
+int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
new file mode 100644 (file)
index 0000000..8086ecf
--- /dev/null
@@ -0,0 +1,1276 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon_fixed.h"
+#include "radeon.h"
+
+void radeon_restore_common_regs(struct drm_device *dev)
+{
+       /* don't need this yet */
+}
+
+static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       int i = 0;
+
+       /* FIXME: Certain revisions of R300 can't recover here.  Not sure of
+          the cause yet, but this workaround will mask the problem for now.
+          Other chips usually will pass at the very first test, so the
+          workaround shouldn't have any effect on them. */
+       for (i = 0;
+            (i < 10000 &&
+             RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+            i++);
+}
+
+static void radeon_pll_write_update(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+
+       while (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+
+       WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+                          RADEON_PPLL_ATOMIC_UPDATE_W,
+                          ~(RADEON_PPLL_ATOMIC_UPDATE_W));
+}
+
+static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       int i = 0;
+
+
+       /* FIXME: Certain revisions of R300 can't recover here.  Not sure of
+          the cause yet, but this workaround will mask the problem for now.
+          Other chips usually will pass at the very first test, so the
+          workaround shouldn't have any effect on them. */
+       for (i = 0;
+            (i < 10000 &&
+             RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+            i++);
+}
+
+static void radeon_pll2_write_update(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+
+       while (RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+
+       WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+                          RADEON_P2PLL_ATOMIC_UPDATE_W,
+                          ~(RADEON_P2PLL_ATOMIC_UPDATE_W));
+}
+
+static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
+                                      uint16_t fb_div)
+{
+       unsigned int vcoFreq;
+
+       if (!ref_div)
+               return 1;
+
+       vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
+
+       /*
+        * This is horribly crude: the VCO frequency range is divided into
+        * 3 parts, each part having a fixed PLL gain value.
+        */
+       if (vcoFreq >= 30000)
+               /*
+                * [300..max] MHz : 7
+                */
+               return 7;
+       else if (vcoFreq >= 18000)
+               /*
+                * [180..300) MHz : 4
+                */
+               return 4;
+       else
+               /*
+                * [0..180) MHz : 1
+                */
+               return 1;
+}
+
+void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t mask;
+
+       if (radeon_crtc->crtc_id)
+               mask = (RADEON_CRTC2_EN |
+                       RADEON_CRTC2_DISP_DIS |
+                       RADEON_CRTC2_VSYNC_DIS |
+                       RADEON_CRTC2_HSYNC_DIS |
+                       RADEON_CRTC2_DISP_REQ_EN_B);
+       else
+               mask = (RADEON_CRTC_DISPLAY_DIS |
+                       RADEON_CRTC_VSYNC_DIS |
+                       RADEON_CRTC_HSYNC_DIS);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               if (radeon_crtc->crtc_id)
+                       WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
+               else {
+                       WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
+                                                                        RADEON_CRTC_DISP_REQ_EN_B));
+                       WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
+               }
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               if (radeon_crtc->crtc_id)
+                       WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
+               else {
+                       WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
+                                                                                   RADEON_CRTC_DISP_REQ_EN_B));
+                       WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
+               }
+               break;
+       }
+
+       if (mode != DRM_MODE_DPMS_OFF) {
+               radeon_crtc_load_lut(crtc);
+       }
+}
+
+/* properly set crtc bpp when using atombios */
+void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       int format;
+       uint32_t crtc_gen_cntl;
+       uint32_t disp_merge_cntl;
+       uint32_t crtc_pitch;
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 15:      /*  555 */
+               format = 3;
+               break;
+       case 16:      /*  565 */
+               format = 4;
+               break;
+       case 24:      /*  RGB */
+               format = 5;
+               break;
+       case 32:      /* xRGB */
+               format = 6;
+               break;
+       default:
+               return;
+       }
+
+       crtc_pitch  = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
+                       ((crtc->fb->bits_per_pixel * 8) - 1)) /
+                      (crtc->fb->bits_per_pixel * 8));
+       crtc_pitch |= crtc_pitch << 16;
+
+       WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
+
+       switch (radeon_crtc->crtc_id) {
+       case 0:
+               disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+               disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+               WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+
+               crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
+               crtc_gen_cntl |= (format << 8);
+               crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
+               WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+               break;
+       case 1:
+               disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+               disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+               WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
+
+               crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
+               crtc_gen_cntl |= (format << 8);
+               WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
+               WREG32(RADEON_FP_H2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
+               WREG32(RADEON_FP_V2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
+               break;
+       }
+}
+
+int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+                        struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_framebuffer *radeon_fb;
+       struct drm_gem_object *obj;
+       uint64_t base;
+       uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
+       uint32_t crtc_pitch, pitch_pixels;
+
+       DRM_DEBUG("\n");
+
+       radeon_fb = to_radeon_framebuffer(crtc->fb);
+
+       obj = radeon_fb->obj;
+       if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
+               return -EINVAL;
+       }
+       crtc_offset = (u32)base;
+       crtc_offset_cntl = 0;
+
+       pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+       crtc_pitch  = (((pitch_pixels * crtc->fb->bits_per_pixel) +
+                       ((crtc->fb->bits_per_pixel * 8) - 1)) /
+                      (crtc->fb->bits_per_pixel * 8));
+       crtc_pitch |= crtc_pitch << 16;
+
+       /* TODO tiling */
+       if (0) {
+               if (ASIC_IS_R300(rdev))
+                       crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
+                                            R300_CRTC_MICRO_TILE_BUFFER_DIS |
+                                            R300_CRTC_MACRO_TILE_EN);
+               else
+                       crtc_offset_cntl |= RADEON_CRTC_TILE_EN;
+       } else {
+               if (ASIC_IS_R300(rdev))
+                       crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN |
+                                             R300_CRTC_MICRO_TILE_BUFFER_DIS |
+                                             R300_CRTC_MACRO_TILE_EN);
+               else
+                       crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
+       }
+
+
+       /* TODO more tiling */
+       if (0) {
+               if (ASIC_IS_R300(rdev)) {
+                       crtc_tile_x0_y0 = x | (y << 16);
+                       base &= ~0x7ff;
+               } else {
+                       int byteshift = crtc->fb->bits_per_pixel >> 4;
+                       int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11;
+                       base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
+                       crtc_offset_cntl |= (y % 16);
+               }
+       } else {
+               int offset = y * pitch_pixels + x;
+               switch (crtc->fb->bits_per_pixel) {
+               case 15:
+               case 16:
+                       offset *= 2;
+                       break;
+               case 24:
+                       offset *= 3;
+                       break;
+               case 32:
+                       offset *= 4;
+                       break;
+               default:
+                       return false;
+               }
+               base += offset;
+       }
+
+       base &= ~7;
+
+       /* update sarea TODO */
+
+       crtc_offset = (u32)base;
+
+       WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location);
+
+       if (ASIC_IS_R300(rdev)) {
+               if (radeon_crtc->crtc_id)
+                       WREG32(R300_CRTC2_TILE_X0_Y0, crtc_tile_x0_y0);
+               else
+                       WREG32(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0);
+       }
+       WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, crtc_offset_cntl);
+       WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
+       WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
+
+       if (old_fb && old_fb != crtc->fb) {
+               radeon_fb = to_radeon_framebuffer(old_fb);
+               radeon_gem_object_unpin(radeon_fb->obj);
+       }
+       return 0;
+}
+
+static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       int format;
+       int hsync_start;
+       int hsync_wid;
+       int vsync_wid;
+       uint32_t crtc_h_total_disp;
+       uint32_t crtc_h_sync_strt_wid;
+       uint32_t crtc_v_total_disp;
+       uint32_t crtc_v_sync_strt_wid;
+
+       DRM_DEBUG("\n");
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 15:      /*  555 */
+               format = 3;
+               break;
+       case 16:      /*  565 */
+               format = 4;
+               break;
+       case 24:      /*  RGB */
+               format = 5;
+               break;
+       case 32:      /* xRGB */
+               format = 6;
+               break;
+       default:
+               return false;
+       }
+
+       crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+                            | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+       hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+       if (!hsync_wid)
+               hsync_wid = 1;
+       hsync_start = mode->crtc_hsync_start - 8;
+
+       crtc_h_sync_strt_wid = ((hsync_start & 0x1fff)
+                               | ((hsync_wid & 0x3f) << 16)
+                               | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+                                  ? RADEON_CRTC_H_SYNC_POL
+                                  : 0));
+
+       /* This works for double scan mode. */
+       crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+                            | ((mode->crtc_vdisplay - 1) << 16));
+
+       vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+       if (!vsync_wid)
+               vsync_wid = 1;
+
+       crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+                               | ((vsync_wid & 0x1f) << 16)
+                               | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+                                  ? RADEON_CRTC_V_SYNC_POL
+                                  : 0));
+
+       /* TODO -> Dell Server */
+       if (0) {
+               uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+               uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+               uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+               uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+
+               dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
+               dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
+
+               /* For CRT on DAC2, don't turn it on if BIOS didn't
+                  enable it, even it's detected.
+               */
+               disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+               tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
+               tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
+
+               WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+               WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+               WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+               WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+       }
+
+       if (radeon_crtc->crtc_id) {
+               uint32_t crtc2_gen_cntl;
+               uint32_t disp2_merge_cntl;
+
+               /* check to see if TV DAC is enabled for another crtc and keep it enabled */
+               if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
+                       crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
+               else
+                       crtc2_gen_cntl = 0;
+
+               crtc2_gen_cntl |= ((format << 8)
+                                  | RADEON_CRTC2_VSYNC_DIS
+                                  | RADEON_CRTC2_HSYNC_DIS
+                                  | RADEON_CRTC2_DISP_DIS
+                                  | RADEON_CRTC2_DISP_REQ_EN_B
+                                  | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+                                     ? RADEON_CRTC2_DBL_SCAN_EN
+                                     : 0)
+                                  | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+                                     ? RADEON_CRTC2_CSYNC_EN
+                                     : 0)
+                                  | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+                                     ? RADEON_CRTC2_INTERLACE_EN
+                                     : 0));
+
+               disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+               disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+
+               WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
+               WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+       } else {
+               uint32_t crtc_gen_cntl;
+               uint32_t crtc_ext_cntl;
+               uint32_t disp_merge_cntl;
+
+               crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
+                                | (format << 8)
+                                | RADEON_CRTC_DISP_REQ_EN_B
+                                | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+                                   ? RADEON_CRTC_DBL_SCAN_EN
+                                   : 0)
+                                | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+                                   ? RADEON_CRTC_CSYNC_EN
+                                   : 0)
+                                | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+                                   ? RADEON_CRTC_INTERLACE_EN
+                                   : 0));
+
+               crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+               crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
+                                 RADEON_CRTC_VSYNC_DIS |
+                                 RADEON_CRTC_HSYNC_DIS |
+                                 RADEON_CRTC_DISPLAY_DIS);
+
+               disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+               disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+
+               WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+               WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+               WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+       }
+
+       WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp);
+       WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid);
+       WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp);
+       WREG32(RADEON_CRTC_V_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_v_sync_strt_wid);
+
+       return true;
+}
+
+static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_encoder *encoder;
+       uint32_t feedback_div = 0;
+       uint32_t frac_fb_div = 0;
+       uint32_t reference_div = 0;
+       uint32_t post_divider = 0;
+       uint32_t freq = 0;
+       uint8_t pll_gain;
+       int pll_flags = RADEON_PLL_LEGACY;
+       bool use_bios_divs = false;
+       /* PLL registers */
+       uint32_t pll_ref_div = 0;
+       uint32_t pll_fb_post_div = 0;
+       uint32_t htotal_cntl = 0;
+
+       struct radeon_pll *pll;
+
+       struct {
+               int divider;
+               int bitvalue;
+       } *post_div, post_divs[]   = {
+               /* From RAGE 128 VR/RAGE 128 GL Register
+                * Reference Manual (Technical Reference
+                * Manual P/N RRG-G04100-C Rev. 0.04), page
+                * 3-17 (PLL_DIV_[3:0]).
+                */
+               {  1, 0 },              /* VCLK_SRC                 */
+               {  2, 1 },              /* VCLK_SRC/2               */
+               {  4, 2 },              /* VCLK_SRC/4               */
+               {  8, 3 },              /* VCLK_SRC/8               */
+               {  3, 4 },              /* VCLK_SRC/3               */
+               { 16, 5 },              /* VCLK_SRC/16              */
+               {  6, 6 },              /* VCLK_SRC/6               */
+               { 12, 7 },              /* VCLK_SRC/12              */
+               {  0, 0 }
+       };
+
+       if (radeon_crtc->crtc_id)
+               pll = &rdev->clock.p2pll;
+       else
+               pll = &rdev->clock.p1pll;
+
+       if (mode->clock > 200000) /* range limits??? */
+               pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+       else
+               pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc == crtc) {
+                       if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+                               pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+                       if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
+                               struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+                               struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+                               if (lvds) {
+                                       if (lvds->use_bios_dividers) {
+                                               pll_ref_div = lvds->panel_ref_divider;
+                                               pll_fb_post_div   = (lvds->panel_fb_divider |
+                                                                    (lvds->panel_post_divider << 16));
+                                               htotal_cntl  = 0;
+                                               use_bios_divs = true;
+                                       }
+                               }
+                               pll_flags |= RADEON_PLL_USE_REF_DIV;
+                       }
+               }
+       }
+
+       DRM_DEBUG("\n");
+
+       if (!use_bios_divs) {
+               radeon_compute_pll(pll, mode->clock,
+                                  &freq, &feedback_div, &frac_fb_div,
+                                  &reference_div, &post_divider,
+                                  pll_flags);
+
+               for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
+                       if (post_div->divider == post_divider)
+                               break;
+               }
+
+               if (!post_div->divider)
+                       post_div = &post_divs[0];
+
+               DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n",
+                         (unsigned)freq,
+                         feedback_div,
+                         reference_div,
+                         post_divider);
+
+               pll_ref_div   = reference_div;
+#if defined(__powerpc__) && (0) /* TODO */
+               /* apparently programming this otherwise causes a hang??? */
+               if (info->MacModel == RADEON_MAC_IBOOK)
+                       pll_fb_post_div = 0x000600ad;
+               else
+#endif
+                       pll_fb_post_div     = (feedback_div | (post_div->bitvalue << 16));
+
+               htotal_cntl    = mode->htotal & 0x7;
+
+       }
+
+       pll_gain = radeon_compute_pll_gain(pll->reference_freq,
+                                          pll_ref_div & 0x3ff,
+                                          pll_fb_post_div & 0x7ff);
+
+       if (radeon_crtc->crtc_id) {
+               uint32_t pixclks_cntl = ((RREG32_PLL(RADEON_PIXCLKS_CNTL) &
+                                         ~(RADEON_PIX2CLK_SRC_SEL_MASK)) |
+                                        RADEON_PIX2CLK_SRC_SEL_P2PLLCLK);
+
+               WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+                            RADEON_PIX2CLK_SRC_SEL_CPUCLK,
+                            ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+               WREG32_PLL_P(RADEON_P2PLL_CNTL,
+                            RADEON_P2PLL_RESET
+                            | RADEON_P2PLL_ATOMIC_UPDATE_EN
+                            | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT),
+                            ~(RADEON_P2PLL_RESET
+                              | RADEON_P2PLL_ATOMIC_UPDATE_EN
+                              | RADEON_P2PLL_PVG_MASK));
+
+               WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+                            pll_ref_div,
+                            ~RADEON_P2PLL_REF_DIV_MASK);
+
+               WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+                            pll_fb_post_div,
+                            ~RADEON_P2PLL_FB0_DIV_MASK);
+
+               WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+                            pll_fb_post_div,
+                            ~RADEON_P2PLL_POST0_DIV_MASK);
+
+               radeon_pll2_write_update(dev);
+               radeon_pll2_wait_for_read_update_complete(dev);
+
+               WREG32_PLL(RADEON_HTOTAL2_CNTL, htotal_cntl);
+
+               WREG32_PLL_P(RADEON_P2PLL_CNTL,
+                            0,
+                            ~(RADEON_P2PLL_RESET
+                              | RADEON_P2PLL_SLEEP
+                              | RADEON_P2PLL_ATOMIC_UPDATE_EN));
+
+               DRM_DEBUG("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+                         (unsigned)pll_ref_div,
+                         (unsigned)pll_fb_post_div,
+                         (unsigned)htotal_cntl,
+                         RREG32_PLL(RADEON_P2PLL_CNTL));
+               DRM_DEBUG("Wrote2: rd=%u, fd=%u, pd=%u\n",
+                         (unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
+                         (unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
+                         (unsigned)((pll_fb_post_div &
+                                     RADEON_P2PLL_POST0_DIV_MASK) >> 16));
+
+               mdelay(50); /* Let the clock to lock */
+
+               WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+                            RADEON_PIX2CLK_SRC_SEL_P2PLLCLK,
+                            ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+               WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+       } else {
+               if (rdev->flags & RADEON_IS_MOBILITY) {
+                       /* A temporal workaround for the occational blanking on certain laptop panels.
+                          This appears to related to the PLL divider registers (fail to lock?).
+                          It occurs even when all dividers are the same with their old settings.
+                          In this case we really don't need to fiddle with PLL registers.
+                          By doing this we can avoid the blanking problem with some panels.
+                       */
+                       if ((pll_ref_div == (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) &&
+                           (pll_fb_post_div == (RREG32_PLL(RADEON_PPLL_DIV_3) &
+                                                (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) {
+                               WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+                                        RADEON_PLL_DIV_SEL,
+                                        ~(RADEON_PLL_DIV_SEL));
+                               r100_pll_errata_after_index(rdev);
+                               return;
+                       }
+               }
+
+               WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+                            RADEON_VCLK_SRC_SEL_CPUCLK,
+                            ~(RADEON_VCLK_SRC_SEL_MASK));
+               WREG32_PLL_P(RADEON_PPLL_CNTL,
+                            RADEON_PPLL_RESET
+                            | RADEON_PPLL_ATOMIC_UPDATE_EN
+                            | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+                            | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT),
+                            ~(RADEON_PPLL_RESET
+                              | RADEON_PPLL_ATOMIC_UPDATE_EN
+                              | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+                              | RADEON_PPLL_PVG_MASK));
+
+               WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+                        RADEON_PLL_DIV_SEL,
+                        ~(RADEON_PLL_DIV_SEL));
+               r100_pll_errata_after_index(rdev);
+
+               if (ASIC_IS_R300(rdev) ||
+                   (rdev->family == CHIP_RS300) ||
+                   (rdev->family == CHIP_RS400) ||
+                   (rdev->family == CHIP_RS480)) {
+                       if (pll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) {
+                               /* When restoring console mode, use saved PPLL_REF_DIV
+                                * setting.
+                                */
+                               WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+                                            pll_ref_div,
+                                            0);
+                       } else {
+                               /* R300 uses ref_div_acc field as real ref divider */
+                               WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+                                            (pll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
+                                            ~R300_PPLL_REF_DIV_ACC_MASK);
+                       }
+               } else
+                       WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+                                    pll_ref_div,
+                                    ~RADEON_PPLL_REF_DIV_MASK);
+
+               WREG32_PLL_P(RADEON_PPLL_DIV_3,
+                            pll_fb_post_div,
+                            ~RADEON_PPLL_FB3_DIV_MASK);
+
+               WREG32_PLL_P(RADEON_PPLL_DIV_3,
+                            pll_fb_post_div,
+                            ~RADEON_PPLL_POST3_DIV_MASK);
+
+               radeon_pll_write_update(dev);
+               radeon_pll_wait_for_read_update_complete(dev);
+
+               WREG32_PLL(RADEON_HTOTAL_CNTL, htotal_cntl);
+
+               WREG32_PLL_P(RADEON_PPLL_CNTL,
+                            0,
+                            ~(RADEON_PPLL_RESET
+                              | RADEON_PPLL_SLEEP
+                              | RADEON_PPLL_ATOMIC_UPDATE_EN
+                              | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
+
+               DRM_DEBUG("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+                         pll_ref_div,
+                         pll_fb_post_div,
+                         (unsigned)htotal_cntl,
+                         RREG32_PLL(RADEON_PPLL_CNTL));
+               DRM_DEBUG("Wrote: rd=%d, fd=%d, pd=%d\n",
+                         pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
+                         pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
+                         (pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
+
+               mdelay(50); /* Let the clock to lock */
+
+               WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+                            RADEON_VCLK_SRC_SEL_PPLLCLK,
+                            ~(RADEON_VCLK_SRC_SEL_MASK));
+
+       }
+}
+
+static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
+                                  struct drm_display_mode *mode,
+                                  struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static int radeon_crtc_mode_set(struct drm_crtc *crtc,
+                                struct drm_display_mode *mode,
+                                struct drm_display_mode *adjusted_mode,
+                                int x, int y, struct drm_framebuffer *old_fb)
+{
+
+       DRM_DEBUG("\n");
+
+       /* TODO TV */
+
+       radeon_crtc_set_base(crtc, x, y, old_fb);
+       radeon_set_crtc_timing(crtc, adjusted_mode);
+       radeon_set_pll(crtc, adjusted_mode);
+       radeon_init_disp_bandwidth(crtc->dev);
+
+       return 0;
+}
+
+static void radeon_crtc_prepare(struct drm_crtc *crtc)
+{
+       radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_crtc_commit(struct drm_crtc *crtc)
+{
+       radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
+       .dpms = radeon_crtc_dpms,
+       .mode_fixup = radeon_crtc_mode_fixup,
+       .mode_set = radeon_crtc_mode_set,
+       .mode_set_base = radeon_crtc_set_base,
+       .prepare = radeon_crtc_prepare,
+       .commit = radeon_crtc_commit,
+};
+
+
+void radeon_legacy_init_crtc(struct drm_device *dev,
+                              struct radeon_crtc *radeon_crtc)
+{
+       if (radeon_crtc->crtc_id == 1)
+               radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
+       drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
+}
+
+void radeon_init_disp_bw_legacy(struct drm_device *dev,
+                               struct drm_display_mode *mode1,
+                               uint32_t pixel_bytes1,
+                               struct drm_display_mode *mode2,
+                               uint32_t pixel_bytes2)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
+       fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
+       fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
+       uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
+       fixed20_12 memtcas_ff[8] = {
+               fixed_init(1),
+               fixed_init(2),
+               fixed_init(3),
+               fixed_init(0),
+               fixed_init_half(1),
+               fixed_init_half(2),
+               fixed_init(0),
+       };
+       fixed20_12 memtcas_rs480_ff[8] = {
+               fixed_init(0),
+               fixed_init(1),
+               fixed_init(2),
+               fixed_init(3),
+               fixed_init(0),
+               fixed_init_half(1),
+               fixed_init_half(2),
+               fixed_init_half(3),
+       };
+       fixed20_12 memtcas2_ff[8] = {
+               fixed_init(0),
+               fixed_init(1),
+               fixed_init(2),
+               fixed_init(3),
+               fixed_init(4),
+               fixed_init(5),
+               fixed_init(6),
+               fixed_init(7),
+       };
+       fixed20_12 memtrbs[8] = {
+               fixed_init(1),
+               fixed_init_half(1),
+               fixed_init(2),
+               fixed_init_half(2),
+               fixed_init(3),
+               fixed_init_half(3),
+               fixed_init(4),
+               fixed_init_half(4)
+       };
+       fixed20_12 memtrbs_r4xx[8] = {
+               fixed_init(4),
+               fixed_init(5),
+               fixed_init(6),
+               fixed_init(7),
+               fixed_init(8),
+               fixed_init(9),
+               fixed_init(10),
+               fixed_init(11)
+       };
+       fixed20_12 min_mem_eff;
+       fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
+       fixed20_12 cur_latency_mclk, cur_latency_sclk;
+       fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
+               disp_drain_rate2, read_return_rate;
+       fixed20_12 time_disp1_drop_priority;
+       int c;
+       int cur_size = 16;       /* in octawords */
+       int critical_point = 0, critical_point2;
+/*     uint32_t read_return_rate, time_disp1_drop_priority; */
+       int stop_req, max_stop_req;
+
+       min_mem_eff.full = rfixed_const_8(0);
+       /* get modes */
+       if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
+               uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
+               mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
+               mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
+               /* check crtc enables */
+               if (mode2)
+                       mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
+               if (mode1)
+                       mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
+               WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
+       }
+
+       /*
+        * determine is there is enough bw for current mode
+        */
+       mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
+       temp_ff.full = rfixed_const(100);
+       mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
+       sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
+       sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
+
+       temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
+       temp_ff.full = rfixed_const(temp);
+       mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
+
+       pix_clk.full = 0;
+       pix_clk2.full = 0;
+       peak_disp_bw.full = 0;
+       if (mode1) {
+               temp_ff.full = rfixed_const(1000);
+               pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
+               pix_clk.full = rfixed_div(pix_clk, temp_ff);
+               temp_ff.full = rfixed_const(pixel_bytes1);
+               peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
+       }
+       if (mode2) {
+               temp_ff.full = rfixed_const(1000);
+               pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
+               pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
+               temp_ff.full = rfixed_const(pixel_bytes2);
+               peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
+       }
+
+       mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
+       if (peak_disp_bw.full >= mem_bw.full) {
+               DRM_ERROR("You may not have enough display bandwidth for current mode\n"
+                         "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
+       }
+
+       /*  Get values from the EXT_MEM_CNTL register...converting its contents. */
+       temp = RREG32(RADEON_MEM_TIMING_CNTL);
+       if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
+               mem_trcd = ((temp >> 2) & 0x3) + 1;
+               mem_trp  = ((temp & 0x3)) + 1;
+               mem_tras = ((temp & 0x70) >> 4) + 1;
+       } else if (rdev->family == CHIP_R300 ||
+                  rdev->family == CHIP_R350) { /* r300, r350 */
+               mem_trcd = (temp & 0x7) + 1;
+               mem_trp = ((temp >> 8) & 0x7) + 1;
+               mem_tras = ((temp >> 11) & 0xf) + 4;
+       } else if (rdev->family == CHIP_RV350 ||
+                  rdev->family <= CHIP_RV380) {
+               /* rv3x0 */
+               mem_trcd = (temp & 0x7) + 3;
+               mem_trp = ((temp >> 8) & 0x7) + 3;
+               mem_tras = ((temp >> 11) & 0xf) + 6;
+       } else if (rdev->family == CHIP_R420 ||
+                  rdev->family == CHIP_R423 ||
+                  rdev->family == CHIP_RV410) {
+               /* r4xx */
+               mem_trcd = (temp & 0xf) + 3;
+               if (mem_trcd > 15)
+                       mem_trcd = 15;
+               mem_trp = ((temp >> 8) & 0xf) + 3;
+               if (mem_trp > 15)
+                       mem_trp = 15;
+               mem_tras = ((temp >> 12) & 0x1f) + 6;
+               if (mem_tras > 31)
+                       mem_tras = 31;
+       } else { /* RV200, R200 */
+               mem_trcd = (temp & 0x7) + 1;
+               mem_trp = ((temp >> 8) & 0x7) + 1;
+               mem_tras = ((temp >> 12) & 0xf) + 4;
+       }
+       /* convert to FF */
+       trcd_ff.full = rfixed_const(mem_trcd);
+       trp_ff.full = rfixed_const(mem_trp);
+       tras_ff.full = rfixed_const(mem_tras);
+
+       /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
+       temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+       data = (temp & (7 << 20)) >> 20;
+       if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
+               if (rdev->family == CHIP_RS480) /* don't think rs400 */
+                       tcas_ff = memtcas_rs480_ff[data];
+               else
+                       tcas_ff = memtcas_ff[data];
+       } else
+               tcas_ff = memtcas2_ff[data];
+
+       if (rdev->family == CHIP_RS400 ||
+           rdev->family == CHIP_RS480) {
+               /* extra cas latency stored in bits 23-25 0-4 clocks */
+               data = (temp >> 23) & 0x7;
+               if (data < 5)
+                       tcas_ff.full += rfixed_const(data);
+       }
+
+       if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+               /* on the R300, Tcas is included in Trbs.
+                */
+               temp = RREG32(RADEON_MEM_CNTL);
+               data = (R300_MEM_NUM_CHANNELS_MASK & temp);
+               if (data == 1) {
+                       if (R300_MEM_USE_CD_CH_ONLY & temp) {
+                               temp = RREG32(R300_MC_IND_INDEX);
+                               temp &= ~R300_MC_IND_ADDR_MASK;
+                               temp |= R300_MC_READ_CNTL_CD_mcind;
+                               WREG32(R300_MC_IND_INDEX, temp);
+                               temp = RREG32(R300_MC_IND_DATA);
+                               data = (R300_MEM_RBS_POSITION_C_MASK & temp);
+                       } else {
+                               temp = RREG32(R300_MC_READ_CNTL_AB);
+                               data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+                       }
+               } else {
+                       temp = RREG32(R300_MC_READ_CNTL_AB);
+                       data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+               }
+               if (rdev->family == CHIP_RV410 ||
+                   rdev->family == CHIP_R420 ||
+                   rdev->family == CHIP_R423)
+                       trbs_ff = memtrbs_r4xx[data];
+               else
+                       trbs_ff = memtrbs[data];
+               tcas_ff.full += trbs_ff.full;
+       }
+
+       sclk_eff_ff.full = sclk_ff.full;
+
+       if (rdev->flags & RADEON_IS_AGP) {
+               fixed20_12 agpmode_ff;
+               agpmode_ff.full = rfixed_const(radeon_agpmode);
+               temp_ff.full = rfixed_const_666(16);
+               sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
+       }
+       /* TODO PCIE lanes may affect this - agpmode == 16?? */
+
+       if (ASIC_IS_R300(rdev)) {
+               sclk_delay_ff.full = rfixed_const(250);
+       } else {
+               if ((rdev->family == CHIP_RV100) ||
+                   rdev->flags & RADEON_IS_IGP) {
+                       if (rdev->mc.vram_is_ddr)
+                               sclk_delay_ff.full = rfixed_const(41);
+                       else
+                               sclk_delay_ff.full = rfixed_const(33);
+               } else {
+                       if (rdev->mc.vram_width == 128)
+                               sclk_delay_ff.full = rfixed_const(57);
+                       else
+                               sclk_delay_ff.full = rfixed_const(41);
+               }
+       }
+
+       mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
+
+       if (rdev->mc.vram_is_ddr) {
+               if (rdev->mc.vram_width == 32) {
+                       k1.full = rfixed_const(40);
+                       c  = 3;
+               } else {
+                       k1.full = rfixed_const(20);
+                       c  = 1;
+               }
+       } else {
+               k1.full = rfixed_const(40);
+               c  = 3;
+       }
+
+       temp_ff.full = rfixed_const(2);
+       mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
+       temp_ff.full = rfixed_const(c);
+       mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
+       temp_ff.full = rfixed_const(4);
+       mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
+       mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
+       mc_latency_mclk.full += k1.full;
+
+       mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
+       mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
+
+       /*
+         HW cursor time assuming worst case of full size colour cursor.
+       */
+       temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+       temp_ff.full += trcd_ff.full;
+       if (temp_ff.full < tras_ff.full)
+               temp_ff.full = tras_ff.full;
+       cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
+
+       temp_ff.full = rfixed_const(cur_size);
+       cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
+       /*
+         Find the total latency for the display data.
+       */
+       disp_latency_overhead.full = rfixed_const(80);
+       disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
+       mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
+       mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
+
+       if (mc_latency_mclk.full > mc_latency_sclk.full)
+               disp_latency.full = mc_latency_mclk.full;
+       else
+               disp_latency.full = mc_latency_sclk.full;
+
+       /* setup Max GRPH_STOP_REQ default value */
+       if (ASIC_IS_RV100(rdev))
+               max_stop_req = 0x5c;
+       else
+               max_stop_req = 0x7c;
+
+       if (mode1) {
+               /*  CRTC1
+                   Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
+                   GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
+               */
+               stop_req = mode1->hdisplay * pixel_bytes1 / 16;
+
+               if (stop_req > max_stop_req)
+                       stop_req = max_stop_req;
+
+               /*
+                 Find the drain rate of the display buffer.
+               */
+               temp_ff.full = rfixed_const((16/pixel_bytes1));
+               disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
+
+               /*
+                 Find the critical point of the display buffer.
+               */
+               crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
+               crit_point_ff.full += rfixed_const_half(0);
+
+               critical_point = rfixed_trunc(crit_point_ff);
+
+               if (rdev->disp_priority == 2) {
+                       critical_point = 0;
+               }
+
+               /*
+                 The critical point should never be above max_stop_req-4.  Setting
+                 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
+               */
+               if (max_stop_req - critical_point < 4)
+                       critical_point = 0;
+
+               if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
+                       /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
+                       critical_point = 0x10;
+               }
+
+               temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
+               temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
+               temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+               temp &= ~(RADEON_GRPH_START_REQ_MASK);
+               if ((rdev->family == CHIP_R350) &&
+                   (stop_req > 0x15)) {
+                       stop_req -= 0x10;
+               }
+               temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+               temp |= RADEON_GRPH_BUFFER_SIZE;
+               temp &= ~(RADEON_GRPH_CRITICAL_CNTL   |
+                         RADEON_GRPH_CRITICAL_AT_SOF |
+                         RADEON_GRPH_STOP_CNTL);
+               /*
+                 Write the result into the register.
+               */
+               WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+                                                      (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+#if 0
+               if ((rdev->family == CHIP_RS400) ||
+                   (rdev->family == CHIP_RS480)) {
+                       /* attempt to program RS400 disp regs correctly ??? */
+                       temp = RREG32(RS400_DISP1_REG_CNTL);
+                       temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
+                                 RS400_DISP1_STOP_REQ_LEVEL_MASK);
+                       WREG32(RS400_DISP1_REQ_CNTL1, (temp |
+                                                      (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+                                                      (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+                       temp = RREG32(RS400_DMIF_MEM_CNTL1);
+                       temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
+                                 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
+                       WREG32(RS400_DMIF_MEM_CNTL1, (temp |
+                                                     (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
+                                                     (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
+               }
+#endif
+
+               DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
+                         /*      (unsigned int)info->SavedReg->grph_buffer_cntl, */
+                         (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
+       }
+
+       if (mode2) {
+               u32 grph2_cntl;
+               stop_req = mode2->hdisplay * pixel_bytes2 / 16;
+
+               if (stop_req > max_stop_req)
+                       stop_req = max_stop_req;
+
+               /*
+                 Find the drain rate of the display buffer.
+               */
+               temp_ff.full = rfixed_const((16/pixel_bytes2));
+               disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
+
+               grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
+               grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
+               grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+               grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
+               if ((rdev->family == CHIP_R350) &&
+                   (stop_req > 0x15)) {
+                       stop_req -= 0x10;
+               }
+               grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+               grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
+               grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL   |
+                         RADEON_GRPH_CRITICAL_AT_SOF |
+                         RADEON_GRPH_STOP_CNTL);
+
+               if ((rdev->family == CHIP_RS100) ||
+                   (rdev->family == CHIP_RS200))
+                       critical_point2 = 0;
+               else {
+                       temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
+                       temp_ff.full = rfixed_const(temp);
+                       temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
+                       if (sclk_ff.full < temp_ff.full)
+                               temp_ff.full = sclk_ff.full;
+
+                       read_return_rate.full = temp_ff.full;
+
+                       if (mode1) {
+                               temp_ff.full = read_return_rate.full - disp_drain_rate.full;
+                               time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
+                       } else {
+                               time_disp1_drop_priority.full = 0;
+                       }
+                       crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
+                       crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
+                       crit_point_ff.full += rfixed_const_half(0);
+
+                       critical_point2 = rfixed_trunc(crit_point_ff);
+
+                       if (rdev->disp_priority == 2) {
+                               critical_point2 = 0;
+                       }
+
+                       if (max_stop_req - critical_point2 < 4)
+                               critical_point2 = 0;
+
+               }
+
+               if (critical_point2 == 0 && rdev->family == CHIP_R300) {
+                       /* some R300 cards have problem with this set to 0 */
+                       critical_point2 = 0x10;
+               }
+
+               WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+                                                 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+               if ((rdev->family == CHIP_RS400) ||
+                   (rdev->family == CHIP_RS480)) {
+#if 0
+                       /* attempt to program RS400 disp2 regs correctly ??? */
+                       temp = RREG32(RS400_DISP2_REQ_CNTL1);
+                       temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
+                                 RS400_DISP2_STOP_REQ_LEVEL_MASK);
+                       WREG32(RS400_DISP2_REQ_CNTL1, (temp |
+                                                      (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+                                                      (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+                       temp = RREG32(RS400_DISP2_REQ_CNTL2);
+                       temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
+                                 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
+                       WREG32(RS400_DISP2_REQ_CNTL2, (temp |
+                                                      (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
+                                                      (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
+#endif
+                       WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
+                       WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
+                       WREG32(RS400_DMIF_MEM_CNTL1,  0x29CA71DC);
+                       WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
+               }
+
+               DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
+                         (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
+       }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
new file mode 100644 (file)
index 0000000..2c2f42d
--- /dev/null
@@ -0,0 +1,1288 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+
+static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder,
+                                      struct drm_display_mode *mode,
+                                      struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       int    xres = mode->hdisplay;
+       int    yres = mode->vdisplay;
+       bool   hscale = true, vscale = true;
+       int    hsync_wid;
+       int    vsync_wid;
+       int    hsync_start;
+       uint32_t scale, inc;
+       uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active;
+       uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp;
+       struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
+
+       DRM_DEBUG("\n");
+
+       fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
+               (RADEON_VERT_STRETCH_RESERVED |
+                RADEON_VERT_AUTO_RATIO_INC);
+       fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
+               (RADEON_HORZ_FP_LOOP_STRETCH |
+                RADEON_HORZ_AUTO_RATIO_INC);
+
+       crtc_more_cntl = 0;
+       if ((rdev->family == CHIP_RS100) ||
+           (rdev->family == CHIP_RS200)) {
+               /* This is to workaround the asic bug for RMX, some versions
+                  of BIOS dosen't have this register initialized correctly. */
+               crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
+       }
+
+
+       fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+                               | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+       hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+       if (!hsync_wid)
+               hsync_wid = 1;
+       hsync_start = mode->crtc_hsync_start - 8;
+
+       fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
+                             | ((hsync_wid & 0x3f) << 16)
+                             | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+                                ? RADEON_CRTC_H_SYNC_POL
+                                : 0));
+
+       fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+                               | ((mode->crtc_vdisplay - 1) << 16));
+
+       vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+       if (!vsync_wid)
+               vsync_wid = 1;
+
+       fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+                             | ((vsync_wid & 0x1f) << 16)
+                             | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+                                ? RADEON_CRTC_V_SYNC_POL
+                                : 0));
+
+       fp_horz_vert_active = 0;
+
+       if (native_mode->panel_xres == 0 ||
+           native_mode->panel_yres == 0) {
+               hscale = false;
+               vscale = false;
+       } else {
+               if (xres > native_mode->panel_xres)
+                       xres = native_mode->panel_xres;
+               if (yres > native_mode->panel_yres)
+                       yres = native_mode->panel_yres;
+
+               if (xres == native_mode->panel_xres)
+                       hscale = false;
+               if (yres == native_mode->panel_yres)
+                       vscale = false;
+       }
+
+       if (radeon_encoder->flags & RADEON_USE_RMX) {
+               if (radeon_encoder->rmx_type != RMX_CENTER) {
+                       if (!hscale)
+                               fp_horz_stretch |= ((xres/8-1) << 16);
+                       else {
+                               inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
+                               scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
+                                       / native_mode->panel_xres + 1;
+                               fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
+                                                   RADEON_HORZ_STRETCH_BLEND |
+                                                   RADEON_HORZ_STRETCH_ENABLE |
+                                                   ((native_mode->panel_xres/8-1) << 16));
+                       }
+
+                       if (!vscale)
+                               fp_vert_stretch |= ((yres-1) << 12);
+                       else {
+                               inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
+                               scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
+                                       / native_mode->panel_yres + 1;
+                               fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
+                                                   RADEON_VERT_STRETCH_ENABLE |
+                                                   RADEON_VERT_STRETCH_BLEND |
+                                                   ((native_mode->panel_yres-1) << 12));
+                       }
+               } else if (radeon_encoder->rmx_type == RMX_CENTER) {
+                       int    blank_width;
+
+                       fp_horz_stretch |= ((xres/8-1) << 16);
+                       fp_vert_stretch |= ((yres-1) << 12);
+
+                       crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
+                                          RADEON_CRTC_AUTO_VERT_CENTER_EN);
+
+                       blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
+                       if (blank_width > 110)
+                               blank_width = 110;
+
+                       fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
+                                               | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+                       hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+                       if (!hsync_wid)
+                               hsync_wid = 1;
+
+                       fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
+                                             | ((hsync_wid & 0x3f) << 16)
+                                             | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+                                                ? RADEON_CRTC_H_SYNC_POL
+                                                : 0));
+
+                       fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
+                                               | ((mode->crtc_vdisplay - 1) << 16));
+
+                       vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+                       if (!vsync_wid)
+                               vsync_wid = 1;
+
+                       fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
+                                              | ((vsync_wid & 0x1f) << 16)
+                                              | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+                                                 ? RADEON_CRTC_V_SYNC_POL
+                                                 : 0)));
+
+                       fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) |
+                                              (((native_mode->panel_xres / 8) & 0x1ff) << 16));
+               }
+       } else {
+               fp_horz_stretch |= ((xres/8-1) << 16);
+               fp_vert_stretch |= ((yres-1) << 12);
+       }
+
+       WREG32(RADEON_FP_HORZ_STRETCH,      fp_horz_stretch);
+       WREG32(RADEON_FP_VERT_STRETCH,      fp_vert_stretch);
+       WREG32(RADEON_CRTC_MORE_CNTL,       crtc_more_cntl);
+       WREG32(RADEON_FP_HORZ_VERT_ACTIVE,  fp_horz_vert_active);
+       WREG32(RADEON_FP_H_SYNC_STRT_WID,   fp_h_sync_strt_wid);
+       WREG32(RADEON_FP_V_SYNC_STRT_WID,   fp_v_sync_strt_wid);
+       WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
+       WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
+
+}
+
+static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
+       int panel_pwr_delay = 2000;
+       DRM_DEBUG("\n");
+
+       if (radeon_encoder->enc_priv) {
+               if (rdev->is_atom_bios) {
+                       struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+                       panel_pwr_delay = lvds->panel_pwr_delay;
+               } else {
+                       struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+                       panel_pwr_delay = lvds->panel_pwr_delay;
+               }
+       }
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
+               disp_pwr_man |= RADEON_AUTO_PWRUP_EN;
+               WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man);
+               lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+               lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
+               WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+               udelay(1000);
+
+               lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+               lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
+               WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+
+               lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+               lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
+               lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
+               udelay(panel_pwr_delay * 1000);
+               WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+               WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
+               lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+               lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+               lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+               udelay(panel_pwr_delay * 1000);
+               WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+               WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+               break;
+       }
+
+       if (rdev->is_atom_bios)
+               radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+       else
+               radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+
+       if (rdev->is_atom_bios)
+               radeon_atom_output_lock(encoder, true);
+       else
+               radeon_combios_output_lock(encoder, true);
+       radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+
+       radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON);
+       if (rdev->is_atom_bios)
+               radeon_atom_output_lock(encoder, false);
+       else
+               radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
+                                       struct drm_display_mode *mode,
+                                       struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
+
+       DRM_DEBUG("\n");
+
+       if (radeon_crtc->crtc_id == 0)
+               radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
+
+       lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+       lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
+
+       lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+       if ((!rdev->is_atom_bios)) {
+               struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+               if (lvds) {
+                       DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
+                       lvds_gen_cntl = lvds->lvds_gen_cntl;
+                       lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+                                             (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+                       lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+                                            (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+               } else
+                       lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+       } else
+               lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+       lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+       lvds_gen_cntl &= ~(RADEON_LVDS_ON |
+                          RADEON_LVDS_BLON |
+                          RADEON_LVDS_EN |
+                          RADEON_LVDS_RST_FM);
+
+       if (ASIC_IS_R300(rdev))
+               lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK);
+
+       if (radeon_crtc->crtc_id == 0) {
+               if (ASIC_IS_R300(rdev)) {
+                       if (radeon_encoder->flags & RADEON_USE_RMX)
+                               lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX;
+               } else
+                       lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2;
+       } else {
+               if (ASIC_IS_R300(rdev))
+                       lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2;
+               else
+                       lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2;
+       }
+
+       WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+       WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+       WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl);
+
+       if (rdev->family == CHIP_RV410)
+               WREG32(RADEON_CLOCK_CNTL_INDEX, 0);
+
+       if (rdev->is_atom_bios)
+               radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+       else
+               radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
+                                         struct drm_display_mode *mode,
+                                         struct drm_display_mode *adjusted_mode)
+{
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+       radeon_encoder->flags &= ~RADEON_USE_RMX;
+
+       if (radeon_encoder->rmx_type != RMX_OFF)
+               radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+
+       return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
+       .dpms = radeon_legacy_lvds_dpms,
+       .mode_fixup = radeon_legacy_lvds_mode_fixup,
+       .prepare = radeon_legacy_lvds_prepare,
+       .mode_set = radeon_legacy_lvds_mode_set,
+       .commit = radeon_legacy_lvds_commit,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
+       .destroy = radeon_enc_destroy,
+};
+
+static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
+                                                struct drm_display_mode *mode,
+                                                struct drm_display_mode *adjusted_mode)
+{
+
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+       return true;
+}
+
+static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+       uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
+       uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+       DRM_DEBUG("\n");
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               crtc_ext_cntl |= RADEON_CRTC_CRT_ON;
+               dac_cntl &= ~RADEON_DAC_PDWN;
+               dac_macro_cntl &= ~(RADEON_DAC_PDWN_R |
+                                   RADEON_DAC_PDWN_G |
+                                   RADEON_DAC_PDWN_B);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON;
+               dac_cntl |= RADEON_DAC_PDWN;
+               dac_macro_cntl |= (RADEON_DAC_PDWN_R |
+                                  RADEON_DAC_PDWN_G |
+                                  RADEON_DAC_PDWN_B);
+               break;
+       }
+
+       WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+       WREG32(RADEON_DAC_CNTL, dac_cntl);
+       WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+       if (rdev->is_atom_bios)
+               radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+       else
+               radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+
+       if (rdev->is_atom_bios)
+               radeon_atom_output_lock(encoder, true);
+       else
+               radeon_combios_output_lock(encoder, true);
+       radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+
+       radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+       if (rdev->is_atom_bios)
+               radeon_atom_output_lock(encoder, false);
+       else
+               radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
+                                              struct drm_display_mode *mode,
+                                              struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
+
+       DRM_DEBUG("\n");
+
+       if (radeon_crtc->crtc_id == 0)
+               radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
+
+       if (radeon_crtc->crtc_id == 0) {
+               if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+                       disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+                               ~(RADEON_DISP_DAC_SOURCE_MASK);
+                       WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+               } else {
+                       dac2_cntl = RREG32(RADEON_DAC_CNTL2)  & ~(RADEON_DAC2_DAC_CLK_SEL);
+                       WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+               }
+       } else {
+               if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+                       disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+                               ~(RADEON_DISP_DAC_SOURCE_MASK);
+                       disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2;
+                       WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+               } else {
+                       dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL;
+                       WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+               }
+       }
+
+       dac_cntl = (RADEON_DAC_MASK_ALL |
+                   RADEON_DAC_VGA_ADR_EN |
+                   /* TODO 6-bits */
+                   RADEON_DAC_8BIT_EN);
+
+       WREG32_P(RADEON_DAC_CNTL,
+                      dac_cntl,
+                      RADEON_DAC_RANGE_CNTL |
+                      RADEON_DAC_BLANKING);
+
+       if (radeon_encoder->enc_priv) {
+               struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv;
+               dac_macro_cntl = p_dac->ps2_pdac_adj;
+       } else
+               dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+       dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B;
+       WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+       if (rdev->is_atom_bios)
+               radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+       else
+               radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder,
+                                                                 struct drm_connector *connector)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t vclk_ecp_cntl, crtc_ext_cntl;
+       uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp;
+       enum drm_connector_status found = connector_status_disconnected;
+       bool color = true;
+
+       /* save the regs we need */
+       vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+       crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+       dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+       dac_cntl = RREG32(RADEON_DAC_CNTL);
+       dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+       tmp = vclk_ecp_cntl &
+               ~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb);
+       WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+       tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+       WREG32(RADEON_CRTC_EXT_CNTL, tmp);
+
+       tmp = RADEON_DAC_FORCE_BLANK_OFF_EN |
+               RADEON_DAC_FORCE_DATA_EN;
+
+       if (color)
+               tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+       else
+               tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+       if (ASIC_IS_R300(rdev))
+               tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+       else
+               tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+       WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+       tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN);
+       tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
+       WREG32(RADEON_DAC_CNTL, tmp);
+
+       tmp &= ~(RADEON_DAC_PDWN_R |
+                RADEON_DAC_PDWN_G |
+                RADEON_DAC_PDWN_B);
+
+       WREG32(RADEON_DAC_MACRO_CNTL, tmp);
+
+       udelay(2000);
+
+       if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
+               found = connector_status_connected;
+
+       /* restore the regs we used */
+       WREG32(RADEON_DAC_CNTL, dac_cntl);
+       WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+       WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+       WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+       WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl);
+
+       return found;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
+       .dpms = radeon_legacy_primary_dac_dpms,
+       .mode_fixup = radeon_legacy_primary_dac_mode_fixup,
+       .prepare = radeon_legacy_primary_dac_prepare,
+       .mode_set = radeon_legacy_primary_dac_mode_set,
+       .commit = radeon_legacy_primary_dac_commit,
+       .detect = radeon_legacy_primary_dac_detect,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
+       .destroy = radeon_enc_destroy,
+};
+
+static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
+                                             struct drm_display_mode *mode,
+                                             struct drm_display_mode *adjusted_mode)
+{
+
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+       return true;
+}
+
+static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
+       DRM_DEBUG("\n");
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+               break;
+       }
+
+       WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+       if (rdev->is_atom_bios)
+               radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+       else
+               radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+
+       if (rdev->is_atom_bios)
+               radeon_atom_output_lock(encoder, true);
+       else
+               radeon_combios_output_lock(encoder, true);
+       radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+
+       radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON);
+
+       if (rdev->is_atom_bios)
+               radeon_atom_output_lock(encoder, true);
+       else
+               radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
+                                           struct drm_display_mode *mode,
+                                           struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
+       int i;
+
+       DRM_DEBUG("\n");
+
+       if (radeon_crtc->crtc_id == 0)
+               radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
+
+       tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
+       tmp &= 0xfffff;
+       if (rdev->family == CHIP_RV280) {
+               /* bit 22 of TMDS_PLL_CNTL is read-back inverted */
+               tmp ^= (1 << 22);
+               tmds_pll_cntl ^= (1 << 22);
+       }
+
+       if (radeon_encoder->enc_priv) {
+               struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv;
+
+               for (i = 0; i < 4; i++) {
+                       if (tmds->tmds_pll[i].freq == 0)
+                               break;
+                       if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) {
+                               tmp = tmds->tmds_pll[i].value ;
+                               break;
+                       }
+               }
+       }
+
+       if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) {
+               if (tmp & 0xfff00000)
+                       tmds_pll_cntl = tmp;
+               else {
+                       tmds_pll_cntl &= 0xfff00000;
+                       tmds_pll_cntl |= tmp;
+               }
+       } else
+               tmds_pll_cntl = tmp;
+
+       tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
+               ~(RADEON_TMDS_TRANSMITTER_PLLRST);
+
+    if (rdev->family == CHIP_R200 ||
+       rdev->family == CHIP_R100 ||
+       ASIC_IS_R300(rdev))
+           tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
+    else /* RV chips got this bit reversed */
+           tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
+
+    fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
+                  (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
+                   RADEON_FP_CRTC_DONT_SHADOW_HEND));
+
+    fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+
+    if (1) /*  FIXME rgbBits == 8 */
+           fp_gen_cntl |= RADEON_FP_PANEL_FORMAT;  /* 24 bit format */
+    else
+           fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
+
+    if (radeon_crtc->crtc_id == 0) {
+           if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+                   fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+                   if (radeon_encoder->flags & RADEON_USE_RMX)
+                           fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
+                   else
+                           fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
+           } else
+                   fp_gen_cntl |= RADEON_FP_SEL_CRTC1;
+    } else {
+           if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+                   fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+                   fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
+           } else
+                   fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
+    }
+
+    WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
+    WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
+    WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+       if (rdev->is_atom_bios)
+               radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+       else
+               radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
+       .dpms = radeon_legacy_tmds_int_dpms,
+       .mode_fixup = radeon_legacy_tmds_int_mode_fixup,
+       .prepare = radeon_legacy_tmds_int_prepare,
+       .mode_set = radeon_legacy_tmds_int_mode_set,
+       .commit = radeon_legacy_tmds_int_commit,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
+       .destroy = radeon_enc_destroy,
+};
+
+static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
+                                             struct drm_display_mode *mode,
+                                             struct drm_display_mode *adjusted_mode)
+{
+
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+       return true;
+}
+
+static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+       DRM_DEBUG("\n");
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN;
+               fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               fp2_gen_cntl |= RADEON_FP2_BLANK_EN;
+               fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+               break;
+       }
+
+       WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+       if (rdev->is_atom_bios)
+               radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+       else
+               radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+
+       if (rdev->is_atom_bios)
+               radeon_atom_output_lock(encoder, true);
+       else
+               radeon_combios_output_lock(encoder, true);
+       radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+       radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON);
+
+       if (rdev->is_atom_bios)
+               radeon_atom_output_lock(encoder, false);
+       else
+               radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+                                           struct drm_display_mode *mode,
+                                           struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t fp2_gen_cntl;
+
+       DRM_DEBUG("\n");
+
+       if (radeon_crtc->crtc_id == 0)
+               radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
+
+       if (rdev->is_atom_bios) {
+               radeon_encoder->pixel_clock = adjusted_mode->clock;
+               atombios_external_tmds_setup(encoder, ATOM_ENABLE);
+               fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+       } else {
+               fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+
+               if (1) /*  FIXME rgbBits == 8 */
+                       fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */
+               else
+                       fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */
+
+               fp2_gen_cntl &= ~(RADEON_FP2_ON |
+                                 RADEON_FP2_DVO_EN |
+                                 RADEON_FP2_DVO_RATE_SEL_SDR);
+
+               /* XXX: these are oem specific */
+               if (ASIC_IS_R300(rdev)) {
+                       if ((dev->pdev->device == 0x4850) &&
+                           (dev->pdev->subsystem_vendor == 0x1028) &&
+                           (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
+                               fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
+                       else
+                               fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
+
+                       /*if (mode->clock > 165000)
+                         fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
+               }
+       }
+
+       if (radeon_crtc->crtc_id == 0) {
+               if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+                       fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+                       if (radeon_encoder->flags & RADEON_USE_RMX)
+                               fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX;
+                       else
+                               fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1;
+               } else
+                       fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2;
+       } else {
+               if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+                       fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+                       fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+               } else
+                       fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2;
+       }
+
+       WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+       if (rdev->is_atom_bios)
+               radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+       else
+               radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
+       .dpms = radeon_legacy_tmds_ext_dpms,
+       .mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
+       .prepare = radeon_legacy_tmds_ext_prepare,
+       .mode_set = radeon_legacy_tmds_ext_mode_set,
+       .commit = radeon_legacy_tmds_ext_commit,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
+       .destroy = radeon_enc_destroy,
+};
+
+static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
+                                           struct drm_display_mode *mode,
+                                           struct drm_display_mode *adjusted_mode)
+{
+
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+       return true;
+}
+
+static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
+       /* uint32_t tv_master_cntl = 0; */
+
+       DRM_DEBUG("\n");
+
+       if (rdev->family == CHIP_R200)
+               fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+       else {
+               crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+               /*  FIXME TV */
+               /* tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); */
+               tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+       }
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               if (rdev->family == CHIP_R200) {
+                       fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+               } else {
+                       crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON;
+                       /* tv_master_cntl |= RADEON_TV_ON; */
+                       if (rdev->family == CHIP_R420 ||
+                                       rdev->family == CHIP_R423 ||
+                                       rdev->family == CHIP_RV410)
+                               tv_dac_cntl &= ~(R420_TV_DAC_RDACPD |
+                                               R420_TV_DAC_GDACPD |
+                                               R420_TV_DAC_BDACPD |
+                                               RADEON_TV_DAC_BGSLEEP);
+                       else
+                               tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD |
+                                               RADEON_TV_DAC_GDACPD |
+                                               RADEON_TV_DAC_BDACPD |
+                                               RADEON_TV_DAC_BGSLEEP);
+               }
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               if (rdev->family == CHIP_R200)
+                       fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+               else {
+                       crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
+                       /* tv_master_cntl &= ~RADEON_TV_ON; */
+                       if (rdev->family == CHIP_R420 ||
+                                       rdev->family == CHIP_R423 ||
+                                       rdev->family == CHIP_RV410)
+                               tv_dac_cntl |= (R420_TV_DAC_RDACPD |
+                                               R420_TV_DAC_GDACPD |
+                                               R420_TV_DAC_BDACPD |
+                                               RADEON_TV_DAC_BGSLEEP);
+                       else
+                               tv_dac_cntl |= (RADEON_TV_DAC_RDACPD |
+                                               RADEON_TV_DAC_GDACPD |
+                                               RADEON_TV_DAC_BDACPD |
+                                               RADEON_TV_DAC_BGSLEEP);
+               }
+               break;
+       }
+
+       if (rdev->family == CHIP_R200) {
+               WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+       } else {
+               WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+               /* WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); */
+               WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+       }
+
+       if (rdev->is_atom_bios)
+               radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+       else
+               radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+
+       if (rdev->is_atom_bios)
+               radeon_atom_output_lock(encoder, true);
+       else
+               radeon_combios_output_lock(encoder, true);
+       radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+
+       radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+       if (rdev->is_atom_bios)
+               radeon_atom_output_lock(encoder, true);
+       else
+               radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
+               struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0;
+       uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0;
+
+       DRM_DEBUG("\n");
+
+       if (radeon_crtc->crtc_id == 0)
+               radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
+
+       if (rdev->family != CHIP_R200) {
+               tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+               if (rdev->family == CHIP_R420 ||
+                               rdev->family == CHIP_R423 ||
+                               rdev->family == CHIP_RV410) {
+                       tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+                                       RADEON_TV_DAC_BGADJ_MASK |
+                                       R420_TV_DAC_DACADJ_MASK |
+                                       R420_TV_DAC_RDACPD |
+                                       R420_TV_DAC_GDACPD |
+                                       R420_TV_DAC_GDACPD |
+                                       R420_TV_DAC_TVENABLE);
+               } else {
+                       tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+                                       RADEON_TV_DAC_BGADJ_MASK |
+                                       RADEON_TV_DAC_DACADJ_MASK |
+                                       RADEON_TV_DAC_RDACPD |
+                                       RADEON_TV_DAC_GDACPD |
+                                       RADEON_TV_DAC_GDACPD);
+               }
+
+               /*  FIXME TV */
+               if (radeon_encoder->enc_priv) {
+                       struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+                       tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+                                       RADEON_TV_DAC_NHOLD |
+                                       RADEON_TV_DAC_STD_PS2 |
+                                       tv_dac->ps2_tvdac_adj);
+               } else
+                       tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+                                       RADEON_TV_DAC_NHOLD |
+                                       RADEON_TV_DAC_STD_PS2);
+
+               WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+       }
+
+       if (ASIC_IS_R300(rdev)) {
+               gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
+               disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+       } else if (rdev->family == CHIP_R200)
+               fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+       else
+               disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+
+       dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL;
+
+       if (radeon_crtc->crtc_id == 0) {
+               if (ASIC_IS_R300(rdev)) {
+                       disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+                       disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC;
+               } else if (rdev->family == CHIP_R200) {
+                       fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+                                         RADEON_FP2_DVO_RATE_SEL_SDR);
+               } else
+                       disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+       } else {
+               if (ASIC_IS_R300(rdev)) {
+                       disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+                       disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+               } else if (rdev->family == CHIP_R200) {
+                       fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+                                         RADEON_FP2_DVO_RATE_SEL_SDR);
+                       fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+               } else
+                       disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
+       }
+
+       WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+
+       if (ASIC_IS_R300(rdev)) {
+               WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+               WREG32(RADEON_DISP_TV_OUT_CNTL, disp_output_cntl);
+       } else if (rdev->family == CHIP_R200)
+               WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+       else
+               WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+
+       if (rdev->is_atom_bios)
+               radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+       else
+               radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+
+}
+
+static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
+                                                            struct drm_connector *connector)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+       uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp;
+       enum drm_connector_status found = connector_status_disconnected;
+       bool color = true;
+
+       /*  FIXME tv */
+
+       /* save the regs we need */
+       pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+       gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0;
+       disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0;
+       disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG);
+       crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+       tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+       dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+       dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+
+       tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb
+                              | RADEON_PIX2CLK_DAC_ALWAYS_ONb);
+       WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+       if (ASIC_IS_R300(rdev))
+               WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
+
+       tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
+       tmp |= RADEON_CRTC2_CRT2_ON |
+               (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
+
+       WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+
+       if (ASIC_IS_R300(rdev)) {
+               tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+               tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+               WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+       } else {
+               tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
+               WREG32(RADEON_DISP_HW_DEBUG, tmp);
+       }
+
+       tmp = RADEON_TV_DAC_NBLANK |
+               RADEON_TV_DAC_NHOLD |
+               RADEON_TV_MONITOR_DETECT_EN |
+               RADEON_TV_DAC_STD_PS2;
+
+       WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+       tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN |
+               RADEON_DAC2_FORCE_DATA_EN;
+
+       if (color)
+               tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+       else
+               tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+       if (ASIC_IS_R300(rdev))
+               tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+       else
+               tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+       WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+       tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
+       WREG32(RADEON_DAC_CNTL2, tmp);
+
+       udelay(10000);
+
+       if (ASIC_IS_R300(rdev)) {
+               if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
+                       found = connector_status_connected;
+       } else {
+               if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT)
+                       found = connector_status_connected;
+       }
+
+       /* restore regs we used */
+       WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+       WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+       WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+       WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+
+       if (ASIC_IS_R300(rdev)) {
+               WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+               WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+       } else {
+               WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+       }
+       WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+
+       /* return found; */
+       return connector_status_disconnected;
+
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
+       .dpms = radeon_legacy_tv_dac_dpms,
+       .mode_fixup = radeon_legacy_tv_dac_mode_fixup,
+       .prepare = radeon_legacy_tv_dac_prepare,
+       .mode_set = radeon_legacy_tv_dac_mode_set,
+       .commit = radeon_legacy_tv_dac_commit,
+       .detect = radeon_legacy_tv_dac_detect,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = {
+       .destroy = radeon_enc_destroy,
+};
+
+void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_encoder *encoder;
+       struct radeon_encoder *radeon_encoder;
+
+       /* see if we already added it */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               radeon_encoder = to_radeon_encoder(encoder);
+               if (radeon_encoder->encoder_id == encoder_id) {
+                       radeon_encoder->devices |= supported_device;
+                       return;
+               }
+
+       }
+
+       /* add a new one */
+       radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+       if (!radeon_encoder)
+               return;
+
+       encoder = &radeon_encoder->base;
+       encoder->possible_crtcs = 0x3;
+       encoder->possible_clones = 0;
+
+       radeon_encoder->enc_priv = NULL;
+
+       radeon_encoder->encoder_id = encoder_id;
+       radeon_encoder->devices = supported_device;
+
+       switch (radeon_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+               drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS);
+               drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
+               if (rdev->is_atom_bios)
+                       radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+               else
+                       radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder);
+               radeon_encoder->rmx_type = RMX_FULL;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+               drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS);
+               drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
+               if (rdev->is_atom_bios)
+                       radeon_encoder->enc_priv = radeon_atombios_get_tmds_info(radeon_encoder);
+               else
+                       radeon_encoder->enc_priv = radeon_combios_get_tmds_info(radeon_encoder);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+               drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC);
+               drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
+               if (rdev->is_atom_bios)
+                       radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
+               else
+                       radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+               drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+               drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
+               if (rdev->is_atom_bios)
+                       radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
+               else
+                       radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+               drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
+               drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
+               if (!rdev->is_atom_bios)
+                       radeon_combios_get_ext_tmds_info(radeon_encoder);
+               break;
+       }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
new file mode 100644 (file)
index 0000000..9173b68
--- /dev/null
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ *                VA Linux Systems Inc., Fremont, California.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Original Authors:
+ *   Kevin E. Martin, Rickard E. Faith, Alan Hourihane
+ *
+ * Kernel port Author: Dave Airlie
+ */
+
+#ifndef RADEON_MODE_H
+#define RADEON_MODE_H
+
+#include <drm_crtc.h>
+#include <drm_mode.h>
+#include <drm_edid.h>
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+
+#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
+#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
+#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
+#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
+
+enum radeon_connector_type {
+       CONNECTOR_NONE,
+       CONNECTOR_VGA,
+       CONNECTOR_DVI_I,
+       CONNECTOR_DVI_D,
+       CONNECTOR_DVI_A,
+       CONNECTOR_STV,
+       CONNECTOR_CTV,
+       CONNECTOR_LVDS,
+       CONNECTOR_DIGITAL,
+       CONNECTOR_SCART,
+       CONNECTOR_HDMI_TYPE_A,
+       CONNECTOR_HDMI_TYPE_B,
+       CONNECTOR_0XC,
+       CONNECTOR_0XD,
+       CONNECTOR_DIN,
+       CONNECTOR_DISPLAY_PORT,
+       CONNECTOR_UNSUPPORTED
+};
+
+enum radeon_dvi_type {
+       DVI_AUTO,
+       DVI_DIGITAL,
+       DVI_ANALOG
+};
+
+enum radeon_rmx_type {
+       RMX_OFF,
+       RMX_FULL,
+       RMX_CENTER,
+       RMX_ASPECT
+};
+
+enum radeon_tv_std {
+       TV_STD_NTSC,
+       TV_STD_PAL,
+       TV_STD_PAL_M,
+       TV_STD_PAL_60,
+       TV_STD_NTSC_J,
+       TV_STD_SCART_PAL,
+       TV_STD_SECAM,
+       TV_STD_PAL_CN,
+};
+
+struct radeon_i2c_bus_rec {
+       bool valid;
+       uint32_t mask_clk_reg;
+       uint32_t mask_data_reg;
+       uint32_t a_clk_reg;
+       uint32_t a_data_reg;
+       uint32_t put_clk_reg;
+       uint32_t put_data_reg;
+       uint32_t get_clk_reg;
+       uint32_t get_data_reg;
+       uint32_t mask_clk_mask;
+       uint32_t mask_data_mask;
+       uint32_t put_clk_mask;
+       uint32_t put_data_mask;
+       uint32_t get_clk_mask;
+       uint32_t get_data_mask;
+       uint32_t a_clk_mask;
+       uint32_t a_data_mask;
+};
+
+struct radeon_tmds_pll {
+    uint32_t freq;
+    uint32_t value;
+};
+
+#define RADEON_MAX_BIOS_CONNECTOR 16
+
+#define RADEON_PLL_USE_BIOS_DIVS        (1 << 0)
+#define RADEON_PLL_NO_ODD_POST_DIV      (1 << 1)
+#define RADEON_PLL_USE_REF_DIV          (1 << 2)
+#define RADEON_PLL_LEGACY               (1 << 3)
+#define RADEON_PLL_PREFER_LOW_REF_DIV   (1 << 4)
+#define RADEON_PLL_PREFER_HIGH_REF_DIV  (1 << 5)
+#define RADEON_PLL_PREFER_LOW_FB_DIV    (1 << 6)
+#define RADEON_PLL_PREFER_HIGH_FB_DIV   (1 << 7)
+#define RADEON_PLL_PREFER_LOW_POST_DIV  (1 << 8)
+#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
+#define RADEON_PLL_USE_FRAC_FB_DIV      (1 << 10)
+
+struct radeon_pll {
+       uint16_t reference_freq;
+       uint16_t reference_div;
+       uint32_t pll_in_min;
+       uint32_t pll_in_max;
+       uint32_t pll_out_min;
+       uint32_t pll_out_max;
+       uint16_t xclk;
+
+       uint32_t min_ref_div;
+       uint32_t max_ref_div;
+       uint32_t min_post_div;
+       uint32_t max_post_div;
+       uint32_t min_feedback_div;
+       uint32_t max_feedback_div;
+       uint32_t min_frac_feedback_div;
+       uint32_t max_frac_feedback_div;
+       uint32_t best_vco;
+};
+
+struct radeon_i2c_chan {
+       struct drm_device *dev;
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data algo;
+       struct radeon_i2c_bus_rec rec;
+};
+
+/* mostly for macs, but really any system without connector tables */
+enum radeon_connector_table {
+       CT_NONE,
+       CT_GENERIC,
+       CT_IBOOK,
+       CT_POWERBOOK_EXTERNAL,
+       CT_POWERBOOK_INTERNAL,
+       CT_POWERBOOK_VGA,
+       CT_MINI_EXTERNAL,
+       CT_MINI_INTERNAL,
+       CT_IMAC_G5_ISIGHT,
+       CT_EMAC,
+};
+
+struct radeon_mode_info {
+       struct atom_context *atom_context;
+       enum radeon_connector_table connector_table;
+       bool mode_config_initialized;
+};
+
+struct radeon_crtc {
+       struct drm_crtc base;
+       int crtc_id;
+       u16 lut_r[256], lut_g[256], lut_b[256];
+       bool enabled;
+       bool can_tile;
+       uint32_t crtc_offset;
+       struct radeon_framebuffer *fbdev_fb;
+       struct drm_mode_set mode_set;
+       struct drm_gem_object *cursor_bo;
+       uint64_t cursor_addr;
+       int cursor_width;
+       int cursor_height;
+};
+
+#define RADEON_USE_RMX 1
+
+struct radeon_native_mode {
+       /* preferred mode */
+       uint32_t panel_xres, panel_yres;
+       uint32_t hoverplus, hsync_width;
+       uint32_t hblank;
+       uint32_t voverplus, vsync_width;
+       uint32_t vblank;
+       uint32_t dotclock;
+       uint32_t flags;
+};
+
+struct radeon_encoder_primary_dac {
+       /* legacy primary dac */
+       uint32_t ps2_pdac_adj;
+};
+
+struct radeon_encoder_lvds {
+       /* legacy lvds */
+       uint16_t panel_vcc_delay;
+       uint8_t  panel_pwr_delay;
+       uint8_t  panel_digon_delay;
+       uint8_t  panel_blon_delay;
+       uint16_t panel_ref_divider;
+       uint8_t  panel_post_divider;
+       uint16_t panel_fb_divider;
+       bool     use_bios_dividers;
+       uint32_t lvds_gen_cntl;
+       /* panel mode */
+       struct radeon_native_mode native_mode;
+};
+
+struct radeon_encoder_tv_dac {
+       /* legacy tv dac */
+       uint32_t ps2_tvdac_adj;
+       uint32_t ntsc_tvdac_adj;
+       uint32_t pal_tvdac_adj;
+
+       enum radeon_tv_std tv_std;
+};
+
+struct radeon_encoder_int_tmds {
+       /* legacy int tmds */
+       struct radeon_tmds_pll tmds_pll[4];
+};
+
+struct radeon_encoder_atom_dig {
+       /* atom dig */
+       bool coherent_mode;
+       int dig_block;
+       /* atom lvds */
+       uint32_t lvds_misc;
+       uint16_t panel_pwr_delay;
+       /* panel mode */
+       struct radeon_native_mode native_mode;
+};
+
+struct radeon_encoder {
+       struct drm_encoder base;
+       uint32_t encoder_id;
+       uint32_t devices;
+       uint32_t flags;
+       uint32_t pixel_clock;
+       enum radeon_rmx_type rmx_type;
+       struct radeon_native_mode native_mode;
+       void *enc_priv;
+};
+
+struct radeon_connector_atom_dig {
+       uint32_t igp_lane_info;
+       bool linkb;
+};
+
+struct radeon_connector {
+       struct drm_connector base;
+       uint32_t connector_id;
+       uint32_t devices;
+       struct radeon_i2c_chan *ddc_bus;
+       int use_digital;
+       void *con_priv;
+};
+
+struct radeon_framebuffer {
+       struct drm_framebuffer base;
+       struct drm_gem_object *obj;
+};
+
+extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+                                                struct radeon_i2c_bus_rec *rec,
+                                                const char *name);
+extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
+
+extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
+
+extern void radeon_compute_pll(struct radeon_pll *pll,
+                              uint64_t freq,
+                              uint32_t *dot_clock_p,
+                              uint32_t *fb_div_p,
+                              uint32_t *frac_fb_div_p,
+                              uint32_t *ref_div_p,
+                              uint32_t *post_div_p,
+                              int flags);
+
+struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
+extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
+extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
+
+extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
+extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+                                  struct drm_framebuffer *old_fb);
+extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
+                                  struct drm_display_mode *mode,
+                                  struct drm_display_mode *adjusted_mode,
+                                  int x, int y,
+                                  struct drm_framebuffer *old_fb);
+extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
+
+extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+                                struct drm_framebuffer *old_fb);
+extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
+
+extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+                                 struct drm_file *file_priv,
+                                 uint32_t handle,
+                                 uint32_t width,
+                                 uint32_t height);
+extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+                                  int x, int y);
+
+extern bool radeon_atom_get_clock_info(struct drm_device *dev);
+extern bool radeon_combios_get_clock_info(struct drm_device *dev);
+extern struct radeon_encoder_atom_dig *
+radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_int_tmds *
+radeon_atombios_get_tmds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_lvds *
+radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_int_tmds *
+radeon_combios_get_tmds_info(struct radeon_encoder *encoder);
+extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_primary_dac *
+radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+                                    u16 blue, int regno);
+struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
+                                                 struct drm_mode_fb_cmd *mode_cmd,
+                                                 struct drm_gem_object *obj);
+
+int radeonfb_probe(struct drm_device *dev);
+
+int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev);
+void radeon_atombios_init_crtc(struct drm_device *dev,
+                              struct radeon_crtc *radeon_crtc);
+void radeon_legacy_init_crtc(struct drm_device *dev,
+                            struct radeon_crtc *radeon_crtc);
+void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
+
+void radeon_get_clock_info(struct drm_device *dev);
+
+extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
+extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
+
+void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
+                          struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
+void radeon_enc_destroy(struct drm_encoder *encoder);
+void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
+void radeon_combios_asic_init(struct drm_device *dev);
+extern int radeon_static_clocks_init(struct drm_device *dev);
+void radeon_init_disp_bw_legacy(struct drm_device *dev,
+                               struct drm_display_mode *mode1,
+                               uint32_t pixel_bytes1,
+                               struct drm_display_mode *mode2,
+                               uint32_t pixel_bytes2);
+void radeon_init_disp_bw_avivo(struct drm_device *dev,
+                              struct drm_display_mode *mode1,
+                              uint32_t pixel_bytes1,
+                              struct drm_display_mode *mode2,
+                              uint32_t pixel_bytes2);
+void radeon_init_disp_bandwidth(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
new file mode 100644 (file)
index 0000000..983e8df
--- /dev/null
@@ -0,0 +1,511 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *    Dave Airlie
+ */
+#include <linux/list.h>
+#include <drm/drmP.h>
+#include "radeon_drm.h"
+#include "radeon.h"
+
+struct radeon_object {
+       struct ttm_buffer_object        tobj;
+       struct list_head                list;
+       struct radeon_device            *rdev;
+       struct drm_gem_object           *gobj;
+       struct ttm_bo_kmap_obj          kmap;
+       unsigned                        pin_count;
+       uint64_t                        gpu_addr;
+       void                            *kptr;
+       bool                            is_iomem;
+};
+
+int radeon_ttm_init(struct radeon_device *rdev);
+void radeon_ttm_fini(struct radeon_device *rdev);
+
+/*
+ * To exclude mutual BO access we rely on bo_reserve exclusion, as all
+ * function are calling it.
+ */
+
+static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
+{
+       return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
+}
+
+static void radeon_object_unreserve(struct radeon_object *robj)
+{
+       ttm_bo_unreserve(&robj->tobj);
+}
+
+static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
+{
+       struct radeon_object *robj;
+
+       robj = container_of(tobj, struct radeon_object, tobj);
+       list_del_init(&robj->list);
+       kfree(robj);
+}
+
+static inline void radeon_object_gpu_addr(struct radeon_object *robj)
+{
+       /* Default gpu address */
+       robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
+       if (robj->tobj.mem.mm_node == NULL) {
+               return;
+       }
+       robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
+       switch (robj->tobj.mem.mem_type) {
+       case TTM_PL_VRAM:
+               robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
+               break;
+       case TTM_PL_TT:
+               robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
+               break;
+       default:
+               DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
+               robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
+               return;
+       }
+}
+
+static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
+{
+       uint32_t flags = 0;
+       if (domain & RADEON_GEM_DOMAIN_VRAM) {
+               flags |= TTM_PL_FLAG_VRAM;
+       }
+       if (domain & RADEON_GEM_DOMAIN_GTT) {
+               flags |= TTM_PL_FLAG_TT;
+       }
+       if (domain & RADEON_GEM_DOMAIN_CPU) {
+               flags |= TTM_PL_FLAG_SYSTEM;
+       }
+       if (!flags) {
+               flags |= TTM_PL_FLAG_SYSTEM;
+       }
+       return flags;
+}
+
+int radeon_object_create(struct radeon_device *rdev,
+                        struct drm_gem_object *gobj,
+                        unsigned long size,
+                        bool kernel,
+                        uint32_t domain,
+                        bool interruptible,
+                        struct radeon_object **robj_ptr)
+{
+       struct radeon_object *robj;
+       enum ttm_bo_type type;
+       uint32_t flags;
+       int r;
+
+       if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
+               rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+       }
+       if (kernel) {
+               type = ttm_bo_type_kernel;
+       } else {
+               type = ttm_bo_type_device;
+       }
+       *robj_ptr = NULL;
+       robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
+       if (robj == NULL) {
+               return -ENOMEM;
+       }
+       robj->rdev = rdev;
+       robj->gobj = gobj;
+       INIT_LIST_HEAD(&robj->list);
+
+       flags = radeon_object_flags_from_domain(domain);
+       r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
+                                  0, 0, false, NULL, size,
+                                  &radeon_ttm_object_object_destroy);
+       if (unlikely(r != 0)) {
+               /* ttm call radeon_ttm_object_object_destroy if error happen */
+               DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
+                         size, flags, 0);
+               return r;
+       }
+       *robj_ptr = robj;
+       if (gobj) {
+               list_add_tail(&robj->list, &rdev->gem.objects);
+       }
+       return 0;
+}
+
+int radeon_object_kmap(struct radeon_object *robj, void **ptr)
+{
+       int r;
+
+       spin_lock(&robj->tobj.lock);
+       if (robj->kptr) {
+               if (ptr) {
+                       *ptr = robj->kptr;
+               }
+               spin_unlock(&robj->tobj.lock);
+               return 0;
+       }
+       spin_unlock(&robj->tobj.lock);
+       r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
+       if (r) {
+               return r;
+       }
+       spin_lock(&robj->tobj.lock);
+       robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
+       spin_unlock(&robj->tobj.lock);
+       if (ptr) {
+               *ptr = robj->kptr;
+       }
+       return 0;
+}
+
+void radeon_object_kunmap(struct radeon_object *robj)
+{
+       spin_lock(&robj->tobj.lock);
+       if (robj->kptr == NULL) {
+               spin_unlock(&robj->tobj.lock);
+               return;
+       }
+       robj->kptr = NULL;
+       spin_unlock(&robj->tobj.lock);
+       ttm_bo_kunmap(&robj->kmap);
+}
+
+void radeon_object_unref(struct radeon_object **robj)
+{
+       struct ttm_buffer_object *tobj;
+
+       if ((*robj) == NULL) {
+               return;
+       }
+       tobj = &((*robj)->tobj);
+       ttm_bo_unref(&tobj);
+       if (tobj == NULL) {
+               *robj = NULL;
+       }
+}
+
+int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
+{
+       *offset = robj->tobj.addr_space_offset;
+       return 0;
+}
+
+int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
+                     uint64_t *gpu_addr)
+{
+       uint32_t flags;
+       uint32_t tmp;
+       void *fbptr;
+       int r;
+
+       flags = radeon_object_flags_from_domain(domain);
+       spin_lock(&robj->tobj.lock);
+       if (robj->pin_count) {
+               robj->pin_count++;
+               if (gpu_addr != NULL) {
+                       *gpu_addr = robj->gpu_addr;
+               }
+               spin_unlock(&robj->tobj.lock);
+               return 0;
+       }
+       spin_unlock(&robj->tobj.lock);
+       r = radeon_object_reserve(robj, false);
+       if (unlikely(r != 0)) {
+               DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
+               return r;
+       }
+       if (robj->rdev->fbdev_robj == robj) {
+               mutex_lock(&robj->rdev->fbdev_info->lock);
+               radeon_object_kunmap(robj);
+       }
+       tmp = robj->tobj.mem.placement;
+       ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
+       robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
+       r = ttm_buffer_object_validate(&robj->tobj,
+                                      robj->tobj.proposed_placement,
+                                      false, false);
+       radeon_object_gpu_addr(robj);
+       if (gpu_addr != NULL) {
+               *gpu_addr = robj->gpu_addr;
+       }
+       robj->pin_count = 1;
+       if (unlikely(r != 0)) {
+               DRM_ERROR("radeon: failed to pin object.\n");
+       }
+       radeon_object_unreserve(robj);
+       if (robj->rdev->fbdev_robj == robj) {
+               if (!r) {
+                       r = radeon_object_kmap(robj, &fbptr);
+               }
+               if (!r) {
+                       robj->rdev->fbdev_info->screen_base = fbptr;
+                       robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
+               }
+               mutex_unlock(&robj->rdev->fbdev_info->lock);
+       }
+       return r;
+}
+
+void radeon_object_unpin(struct radeon_object *robj)
+{
+       uint32_t flags;
+       void *fbptr;
+       int r;
+
+       spin_lock(&robj->tobj.lock);
+       if (!robj->pin_count) {
+               spin_unlock(&robj->tobj.lock);
+               printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
+               return;
+       }
+       robj->pin_count--;
+       if (robj->pin_count) {
+               spin_unlock(&robj->tobj.lock);
+               return;
+       }
+       spin_unlock(&robj->tobj.lock);
+       r = radeon_object_reserve(robj, false);
+       if (unlikely(r != 0)) {
+               DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
+               return;
+       }
+       if (robj->rdev->fbdev_robj == robj) {
+               mutex_lock(&robj->rdev->fbdev_info->lock);
+               radeon_object_kunmap(robj);
+       }
+       flags = robj->tobj.mem.placement;
+       robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
+       r = ttm_buffer_object_validate(&robj->tobj,
+                                      robj->tobj.proposed_placement,
+                                      false, false);
+       if (unlikely(r != 0)) {
+               DRM_ERROR("radeon: failed to unpin buffer.\n");
+       }
+       radeon_object_unreserve(robj);
+       if (robj->rdev->fbdev_robj == robj) {
+               if (!r) {
+                       r = radeon_object_kmap(robj, &fbptr);
+               }
+               if (!r) {
+                       robj->rdev->fbdev_info->screen_base = fbptr;
+                       robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
+               }
+               mutex_unlock(&robj->rdev->fbdev_info->lock);
+       }
+}
+
+int radeon_object_wait(struct radeon_object *robj)
+{
+       int r = 0;
+
+       /* FIXME: should use block reservation instead */
+       r = radeon_object_reserve(robj, true);
+       if (unlikely(r != 0)) {
+               DRM_ERROR("radeon: failed to reserve object for waiting.\n");
+               return r;
+       }
+       spin_lock(&robj->tobj.lock);
+       if (robj->tobj.sync_obj) {
+               r = ttm_bo_wait(&robj->tobj, true, false, false);
+       }
+       spin_unlock(&robj->tobj.lock);
+       radeon_object_unreserve(robj);
+       return r;
+}
+
+int radeon_object_evict_vram(struct radeon_device *rdev)
+{
+       if (rdev->flags & RADEON_IS_IGP) {
+               /* Useless to evict on IGP chips */
+               return 0;
+       }
+       return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+}
+
+void radeon_object_force_delete(struct radeon_device *rdev)
+{
+       struct radeon_object *robj, *n;
+       struct drm_gem_object *gobj;
+
+       if (list_empty(&rdev->gem.objects)) {
+               return;
+       }
+       DRM_ERROR("Userspace still has active objects !\n");
+       list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
+               mutex_lock(&rdev->ddev->struct_mutex);
+               gobj = robj->gobj;
+               DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
+                         gobj, robj, (unsigned long)gobj->size,
+                         *((unsigned long *)&gobj->refcount));
+               list_del_init(&robj->list);
+               radeon_object_unref(&robj);
+               gobj->driver_private = NULL;
+               drm_gem_object_unreference(gobj);
+               mutex_unlock(&rdev->ddev->struct_mutex);
+       }
+}
+
+int radeon_object_init(struct radeon_device *rdev)
+{
+       return radeon_ttm_init(rdev);
+}
+
+void radeon_object_fini(struct radeon_device *rdev)
+{
+       radeon_ttm_fini(rdev);
+}
+
+void radeon_object_list_add_object(struct radeon_object_list *lobj,
+                                  struct list_head *head)
+{
+       if (lobj->wdomain) {
+               list_add(&lobj->list, head);
+       } else {
+               list_add_tail(&lobj->list, head);
+       }
+}
+
+int radeon_object_list_reserve(struct list_head *head)
+{
+       struct radeon_object_list *lobj;
+       struct list_head *i;
+       int r;
+
+       list_for_each(i, head) {
+               lobj = list_entry(i, struct radeon_object_list, list);
+               if (!lobj->robj->pin_count) {
+                       r = radeon_object_reserve(lobj->robj, true);
+                       if (unlikely(r != 0)) {
+                               DRM_ERROR("radeon: failed to reserve object.\n");
+                               return r;
+                       }
+               } else {
+               }
+       }
+       return 0;
+}
+
+void radeon_object_list_unreserve(struct list_head *head)
+{
+       struct radeon_object_list *lobj;
+       struct list_head *i;
+
+       list_for_each(i, head) {
+               lobj = list_entry(i, struct radeon_object_list, list);
+               if (!lobj->robj->pin_count) {
+                       radeon_object_unreserve(lobj->robj);
+               } else {
+               }
+       }
+}
+
+int radeon_object_list_validate(struct list_head *head, void *fence)
+{
+       struct radeon_object_list *lobj;
+       struct radeon_object *robj;
+       struct radeon_fence *old_fence = NULL;
+       struct list_head *i;
+       uint32_t flags;
+       int r;
+
+       r = radeon_object_list_reserve(head);
+       if (unlikely(r != 0)) {
+               radeon_object_list_unreserve(head);
+               return r;
+       }
+       list_for_each(i, head) {
+               lobj = list_entry(i, struct radeon_object_list, list);
+               robj = lobj->robj;
+               if (lobj->wdomain) {
+                       flags = radeon_object_flags_from_domain(lobj->wdomain);
+                       flags |= TTM_PL_FLAG_TT;
+               } else {
+                       flags = radeon_object_flags_from_domain(lobj->rdomain);
+                       flags |= TTM_PL_FLAG_TT;
+                       flags |= TTM_PL_FLAG_VRAM;
+               }
+               if (!robj->pin_count) {
+                       robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
+                       r = ttm_buffer_object_validate(&robj->tobj,
+                                                      robj->tobj.proposed_placement,
+                                                      true, false);
+                       if (unlikely(r)) {
+                               radeon_object_list_unreserve(head);
+                               DRM_ERROR("radeon: failed to validate.\n");
+                               return r;
+                       }
+                       radeon_object_gpu_addr(robj);
+               }
+               lobj->gpu_offset = robj->gpu_addr;
+               if (fence) {
+                       old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
+                       robj->tobj.sync_obj = radeon_fence_ref(fence);
+                       robj->tobj.sync_obj_arg = NULL;
+               }
+               if (old_fence) {
+                       radeon_fence_unref(&old_fence);
+               }
+       }
+       return 0;
+}
+
+void radeon_object_list_unvalidate(struct list_head *head)
+{
+       struct radeon_object_list *lobj;
+       struct radeon_fence *old_fence = NULL;
+       struct list_head *i;
+
+       list_for_each(i, head) {
+               lobj = list_entry(i, struct radeon_object_list, list);
+               old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
+               lobj->robj->tobj.sync_obj = NULL;
+               if (old_fence) {
+                       radeon_fence_unref(&old_fence);
+               }
+       }
+       radeon_object_list_unreserve(head);
+}
+
+void radeon_object_list_clean(struct list_head *head)
+{
+       radeon_object_list_unreserve(head);
+}
+
+int radeon_object_fbdev_mmap(struct radeon_object *robj,
+                            struct vm_area_struct *vma)
+{
+       return ttm_fbdev_mmap(vma, &robj->tobj);
+}
+
+unsigned long radeon_object_size(struct radeon_object *robj)
+{
+       return robj->tobj.num_pages << PAGE_SHIFT;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
new file mode 100644 (file)
index 0000000..473e477
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RADEON_OBJECT_H__
+#define __RADEON_OBJECT_H__
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
+/*
+ * TTM.
+ */
+struct radeon_mman {
+       struct ttm_global_reference     mem_global_ref;
+       bool                            mem_global_referenced;
+       struct ttm_bo_device            bdev;
+};
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
new file mode 100644 (file)
index 0000000..6d3d904
--- /dev/null
@@ -0,0 +1,3570 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ *                VA Linux Systems Inc., Fremont, California.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT.  IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
+ * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Authors:
+ *   Kevin E. Martin <martin@xfree86.org>
+ *   Rickard E. Faith <faith@valinux.com>
+ *   Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ * References:
+ *
+ * !!!! FIXME !!!!
+ *   RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical
+ *   Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April
+ *   1999.
+ *
+ * !!!! FIXME !!!!
+ *   RAGE 128 Software Development Manual (Technical Reference Manual P/N
+ *   SDK-G04000 Rev. 0.01), ATI Technologies: June 1999.
+ *
+ */
+
+/* !!!! FIXME !!!!  NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h
+ * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT
+ * ON THE RADEON.  A FULL AUDIT OF THIS CODE IS NEEDED!  */
+#ifndef _RADEON_REG_H_
+#define _RADEON_REG_H_
+
+#include "r300_reg.h"
+#include "r500_reg.h"
+#include "r600_reg.h"
+
+
+#define RADEON_MC_AGP_LOCATION         0x014c
+#define                RADEON_MC_AGP_START_MASK        0x0000FFFF
+#define                RADEON_MC_AGP_START_SHIFT       0
+#define                RADEON_MC_AGP_TOP_MASK          0xFFFF0000
+#define                RADEON_MC_AGP_TOP_SHIFT         16
+#define RADEON_MC_FB_LOCATION          0x0148
+#define                RADEON_MC_FB_START_MASK         0x0000FFFF
+#define                RADEON_MC_FB_START_SHIFT        0
+#define                RADEON_MC_FB_TOP_MASK           0xFFFF0000
+#define                RADEON_MC_FB_TOP_SHIFT          16
+#define RADEON_AGP_BASE_2              0x015c /* r200+ only */
+#define RADEON_AGP_BASE                        0x0170
+
+#define ATI_DATATYPE_VQ                                0
+#define ATI_DATATYPE_CI4                       1
+#define ATI_DATATYPE_CI8                       2
+#define ATI_DATATYPE_ARGB1555                  3
+#define ATI_DATATYPE_RGB565                    4
+#define ATI_DATATYPE_RGB888                    5
+#define ATI_DATATYPE_ARGB8888                  6
+#define ATI_DATATYPE_RGB332                    7
+#define ATI_DATATYPE_Y8                                8
+#define ATI_DATATYPE_RGB8                      9
+#define ATI_DATATYPE_CI16                      10
+#define ATI_DATATYPE_VYUY_422                  11
+#define ATI_DATATYPE_YVYU_422                  12
+#define ATI_DATATYPE_AYUV_444                  14
+#define ATI_DATATYPE_ARGB4444                  15
+
+                               /* Registers for 2D/Video/Overlay */
+#define RADEON_ADAPTER_ID                   0x0f2c /* PCI */
+#define RADEON_AGP_BASE                     0x0170
+#define RADEON_AGP_CNTL                     0x0174
+#       define RADEON_AGP_APER_SIZE_256MB   (0x00 << 0)
+#       define RADEON_AGP_APER_SIZE_128MB   (0x20 << 0)
+#       define RADEON_AGP_APER_SIZE_64MB    (0x30 << 0)
+#       define RADEON_AGP_APER_SIZE_32MB    (0x38 << 0)
+#       define RADEON_AGP_APER_SIZE_16MB    (0x3c << 0)
+#       define RADEON_AGP_APER_SIZE_8MB     (0x3e << 0)
+#       define RADEON_AGP_APER_SIZE_4MB     (0x3f << 0)
+#       define RADEON_AGP_APER_SIZE_MASK    (0x3f << 0)
+#define RADEON_STATUS_PCI_CONFIG            0x06
+#       define RADEON_CAP_LIST              0x100000
+#define RADEON_CAPABILITIES_PTR_PCI_CONFIG  0x34 /* offset in PCI config*/
+#       define RADEON_CAP_PTR_MASK          0xfc /* mask off reserved bits of CAP_PTR */
+#       define RADEON_CAP_ID_NULL           0x00 /* End of capability list */
+#       define RADEON_CAP_ID_AGP            0x02 /* AGP capability ID */
+#       define RADEON_CAP_ID_EXP            0x10 /* PCI Express */
+#define RADEON_AGP_COMMAND                  0x0f60 /* PCI */
+#define RADEON_AGP_COMMAND_PCI_CONFIG       0x0060 /* offset in PCI config*/
+#       define RADEON_AGP_ENABLE            (1<<8)
+#define RADEON_AGP_PLL_CNTL                 0x000b /* PLL */
+#define RADEON_AGP_STATUS                   0x0f5c /* PCI */
+#       define RADEON_AGP_1X_MODE           0x01
+#       define RADEON_AGP_2X_MODE           0x02
+#       define RADEON_AGP_4X_MODE           0x04
+#       define RADEON_AGP_FW_MODE           0x10
+#       define RADEON_AGP_MODE_MASK         0x17
+#       define RADEON_AGPv3_MODE            0x08
+#       define RADEON_AGPv3_4X_MODE         0x01
+#       define RADEON_AGPv3_8X_MODE         0x02
+#define RADEON_ATTRDR                       0x03c1 /* VGA */
+#define RADEON_ATTRDW                       0x03c0 /* VGA */
+#define RADEON_ATTRX                        0x03c0 /* VGA */
+#define RADEON_AUX_SC_CNTL                  0x1660
+#       define RADEON_AUX1_SC_EN            (1 << 0)
+#       define RADEON_AUX1_SC_MODE_OR       (0 << 1)
+#       define RADEON_AUX1_SC_MODE_NAND     (1 << 1)
+#       define RADEON_AUX2_SC_EN            (1 << 2)
+#       define RADEON_AUX2_SC_MODE_OR       (0 << 3)
+#       define RADEON_AUX2_SC_MODE_NAND     (1 << 3)
+#       define RADEON_AUX3_SC_EN            (1 << 4)
+#       define RADEON_AUX3_SC_MODE_OR       (0 << 5)
+#       define RADEON_AUX3_SC_MODE_NAND     (1 << 5)
+#define RADEON_AUX1_SC_BOTTOM               0x1670
+#define RADEON_AUX1_SC_LEFT                 0x1664
+#define RADEON_AUX1_SC_RIGHT                0x1668
+#define RADEON_AUX1_SC_TOP                  0x166c
+#define RADEON_AUX2_SC_BOTTOM               0x1680
+#define RADEON_AUX2_SC_LEFT                 0x1674
+#define RADEON_AUX2_SC_RIGHT                0x1678
+#define RADEON_AUX2_SC_TOP                  0x167c
+#define RADEON_AUX3_SC_BOTTOM               0x1690
+#define RADEON_AUX3_SC_LEFT                 0x1684
+#define RADEON_AUX3_SC_RIGHT                0x1688
+#define RADEON_AUX3_SC_TOP                  0x168c
+#define RADEON_AUX_WINDOW_HORZ_CNTL         0x02d8
+#define RADEON_AUX_WINDOW_VERT_CNTL         0x02dc
+
+#define RADEON_BASE_CODE                    0x0f0b
+#define RADEON_BIOS_0_SCRATCH               0x0010
+#       define RADEON_FP_PANEL_SCALABLE     (1 << 16)
+#       define RADEON_FP_PANEL_SCALE_EN     (1 << 17)
+#       define RADEON_FP_CHIP_SCALE_EN      (1 << 18)
+#       define RADEON_DRIVER_BRIGHTNESS_EN  (1 << 26)
+#       define RADEON_DISPLAY_ROT_MASK      (3 << 28)
+#       define RADEON_DISPLAY_ROT_00        (0 << 28)
+#       define RADEON_DISPLAY_ROT_90        (1 << 28)
+#       define RADEON_DISPLAY_ROT_180       (2 << 28)
+#       define RADEON_DISPLAY_ROT_270       (3 << 28)
+#define RADEON_BIOS_1_SCRATCH               0x0014
+#define RADEON_BIOS_2_SCRATCH               0x0018
+#define RADEON_BIOS_3_SCRATCH               0x001c
+#define RADEON_BIOS_4_SCRATCH               0x0020
+#       define RADEON_CRT1_ATTACHED_MASK    (3 << 0)
+#       define RADEON_CRT1_ATTACHED_MONO    (1 << 0)
+#       define RADEON_CRT1_ATTACHED_COLOR   (2 << 0)
+#       define RADEON_LCD1_ATTACHED         (1 << 2)
+#       define RADEON_DFP1_ATTACHED         (1 << 3)
+#       define RADEON_TV1_ATTACHED_MASK     (3 << 4)
+#       define RADEON_TV1_ATTACHED_COMP     (1 << 4)
+#       define RADEON_TV1_ATTACHED_SVIDEO   (2 << 4)
+#       define RADEON_CRT2_ATTACHED_MASK    (3 << 8)
+#       define RADEON_CRT2_ATTACHED_MONO    (1 << 8)
+#       define RADEON_CRT2_ATTACHED_COLOR   (2 << 8)
+#       define RADEON_DFP2_ATTACHED         (1 << 11)
+#define RADEON_BIOS_5_SCRATCH               0x0024
+#       define RADEON_LCD1_ON               (1 << 0)
+#       define RADEON_CRT1_ON               (1 << 1)
+#       define RADEON_TV1_ON                (1 << 2)
+#       define RADEON_DFP1_ON               (1 << 3)
+#       define RADEON_CRT2_ON               (1 << 5)
+#       define RADEON_CV1_ON                (1 << 6)
+#       define RADEON_DFP2_ON               (1 << 7)
+#       define RADEON_LCD1_CRTC_MASK        (1 << 8)
+#       define RADEON_LCD1_CRTC_SHIFT       8
+#       define RADEON_CRT1_CRTC_MASK        (1 << 9)
+#       define RADEON_CRT1_CRTC_SHIFT       9
+#       define RADEON_TV1_CRTC_MASK         (1 << 10)
+#       define RADEON_TV1_CRTC_SHIFT        10
+#       define RADEON_DFP1_CRTC_MASK        (1 << 11)
+#       define RADEON_DFP1_CRTC_SHIFT       11
+#       define RADEON_CRT2_CRTC_MASK        (1 << 12)
+#       define RADEON_CRT2_CRTC_SHIFT       12
+#       define RADEON_CV1_CRTC_MASK         (1 << 13)
+#       define RADEON_CV1_CRTC_SHIFT        13
+#       define RADEON_DFP2_CRTC_MASK        (1 << 14)
+#       define RADEON_DFP2_CRTC_SHIFT       14
+#       define RADEON_ACC_REQ_LCD1          (1 << 16)
+#       define RADEON_ACC_REQ_CRT1          (1 << 17)
+#       define RADEON_ACC_REQ_TV1           (1 << 18)
+#       define RADEON_ACC_REQ_DFP1          (1 << 19)
+#       define RADEON_ACC_REQ_CRT2          (1 << 21)
+#       define RADEON_ACC_REQ_TV2           (1 << 22)
+#       define RADEON_ACC_REQ_DFP2          (1 << 23)
+#define RADEON_BIOS_6_SCRATCH               0x0028
+#       define RADEON_ACC_MODE_CHANGE       (1 << 2)
+#       define RADEON_EXT_DESKTOP_MODE      (1 << 3)
+#       define RADEON_LCD_DPMS_ON           (1 << 20)
+#       define RADEON_CRT_DPMS_ON           (1 << 21)
+#       define RADEON_TV_DPMS_ON            (1 << 22)
+#       define RADEON_DFP_DPMS_ON           (1 << 23)
+#       define RADEON_DPMS_MASK             (3 << 24)
+#       define RADEON_DPMS_ON               (0 << 24)
+#       define RADEON_DPMS_STANDBY          (1 << 24)
+#       define RADEON_DPMS_SUSPEND          (2 << 24)
+#       define RADEON_DPMS_OFF              (3 << 24)
+#       define RADEON_SCREEN_BLANKING       (1 << 26)
+#       define RADEON_DRIVER_CRITICAL       (1 << 27)
+#       define RADEON_DISPLAY_SWITCHING_DIS (1 << 30)
+#define RADEON_BIOS_7_SCRATCH               0x002c
+#       define RADEON_SYS_HOTKEY            (1 << 10)
+#       define RADEON_DRV_LOADED            (1 << 12)
+#define RADEON_BIOS_ROM                     0x0f30 /* PCI */
+#define RADEON_BIST                         0x0f0f /* PCI */
+#define RADEON_BRUSH_DATA0                  0x1480
+#define RADEON_BRUSH_DATA1                  0x1484
+#define RADEON_BRUSH_DATA10                 0x14a8
+#define RADEON_BRUSH_DATA11                 0x14ac
+#define RADEON_BRUSH_DATA12                 0x14b0
+#define RADEON_BRUSH_DATA13                 0x14b4
+#define RADEON_BRUSH_DATA14                 0x14b8
+#define RADEON_BRUSH_DATA15                 0x14bc
+#define RADEON_BRUSH_DATA16                 0x14c0
+#define RADEON_BRUSH_DATA17                 0x14c4
+#define RADEON_BRUSH_DATA18                 0x14c8
+#define RADEON_BRUSH_DATA19                 0x14cc
+#define RADEON_BRUSH_DATA2                  0x1488
+#define RADEON_BRUSH_DATA20                 0x14d0
+#define RADEON_BRUSH_DATA21                 0x14d4
+#define RADEON_BRUSH_DATA22                 0x14d8
+#define RADEON_BRUSH_DATA23                 0x14dc
+#define RADEON_BRUSH_DATA24                 0x14e0
+#define RADEON_BRUSH_DATA25                 0x14e4
+#define RADEON_BRUSH_DATA26                 0x14e8
+#define RADEON_BRUSH_DATA27                 0x14ec
+#define RADEON_BRUSH_DATA28                 0x14f0
+#define RADEON_BRUSH_DATA29                 0x14f4
+#define RADEON_BRUSH_DATA3                  0x148c
+#define RADEON_BRUSH_DATA30                 0x14f8
+#define RADEON_BRUSH_DATA31                 0x14fc
+#define RADEON_BRUSH_DATA32                 0x1500
+#define RADEON_BRUSH_DATA33                 0x1504
+#define RADEON_BRUSH_DATA34                 0x1508
+#define RADEON_BRUSH_DATA35                 0x150c
+#define RADEON_BRUSH_DATA36                 0x1510
+#define RADEON_BRUSH_DATA37                 0x1514
+#define RADEON_BRUSH_DATA38                 0x1518
+#define RADEON_BRUSH_DATA39                 0x151c
+#define RADEON_BRUSH_DATA4                  0x1490
+#define RADEON_BRUSH_DATA40                 0x1520
+#define RADEON_BRUSH_DATA41                 0x1524
+#define RADEON_BRUSH_DATA42                 0x1528
+#define RADEON_BRUSH_DATA43                 0x152c
+#define RADEON_BRUSH_DATA44                 0x1530
+#define RADEON_BRUSH_DATA45                 0x1534
+#define RADEON_BRUSH_DATA46                 0x1538
+#define RADEON_BRUSH_DATA47                 0x153c
+#define RADEON_BRUSH_DATA48                 0x1540
+#define RADEON_BRUSH_DATA49                 0x1544
+#define RADEON_BRUSH_DATA5                  0x1494
+#define RADEON_BRUSH_DATA50                 0x1548
+#define RADEON_BRUSH_DATA51                 0x154c
+#define RADEON_BRUSH_DATA52                 0x1550
+#define RADEON_BRUSH_DATA53                 0x1554
+#define RADEON_BRUSH_DATA54                 0x1558
+#define RADEON_BRUSH_DATA55                 0x155c
+#define RADEON_BRUSH_DATA56                 0x1560
+#define RADEON_BRUSH_DATA57                 0x1564
+#define RADEON_BRUSH_DATA58                 0x1568
+#define RADEON_BRUSH_DATA59                 0x156c
+#define RADEON_BRUSH_DATA6                  0x1498
+#define RADEON_BRUSH_DATA60                 0x1570
+#define RADEON_BRUSH_DATA61                 0x1574
+#define RADEON_BRUSH_DATA62                 0x1578
+#define RADEON_BRUSH_DATA63                 0x157c
+#define RADEON_BRUSH_DATA7                  0x149c
+#define RADEON_BRUSH_DATA8                  0x14a0
+#define RADEON_BRUSH_DATA9                  0x14a4
+#define RADEON_BRUSH_SCALE                  0x1470
+#define RADEON_BRUSH_Y_X                    0x1474
+#define RADEON_BUS_CNTL                     0x0030
+#       define RADEON_BUS_MASTER_DIS         (1 << 6)
+#       define RADEON_BUS_BIOS_DIS_ROM       (1 << 12)
+#       define RADEON_BUS_RD_DISCARD_EN      (1 << 24)
+#       define RADEON_BUS_RD_ABORT_EN        (1 << 25)
+#       define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
+#       define RADEON_BUS_WRT_BURST          (1 << 29)
+#       define RADEON_BUS_READ_BURST         (1 << 30)
+#define RADEON_BUS_CNTL1                    0x0034
+#       define RADEON_BUS_WAIT_ON_LOCK_EN    (1 << 4)
+
+/* #define RADEON_PCIE_INDEX                   0x0030 */
+/* #define RADEON_PCIE_DATA                    0x0034 */
+#define RADEON_PCIE_LC_LINK_WIDTH_CNTL             0xa2 /* PCIE */
+#       define RADEON_PCIE_LC_LINK_WIDTH_SHIFT     0
+#       define RADEON_PCIE_LC_LINK_WIDTH_MASK      0x7
+#       define RADEON_PCIE_LC_LINK_WIDTH_X0        0
+#       define RADEON_PCIE_LC_LINK_WIDTH_X1        1
+#       define RADEON_PCIE_LC_LINK_WIDTH_X2        2
+#       define RADEON_PCIE_LC_LINK_WIDTH_X4        3
+#       define RADEON_PCIE_LC_LINK_WIDTH_X8        4
+#       define RADEON_PCIE_LC_LINK_WIDTH_X12       5
+#       define RADEON_PCIE_LC_LINK_WIDTH_X16       6
+#       define RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT  4
+#       define RADEON_PCIE_LC_LINK_WIDTH_RD_MASK   0x70
+#       define RADEON_PCIE_LC_RECONFIG_NOW         (1 << 8)
+#       define RADEON_PCIE_LC_RECONFIG_LATER       (1 << 9)
+#       define RADEON_PCIE_LC_SHORT_RECONFIG_EN    (1 << 10)
+
+#define RADEON_CACHE_CNTL                   0x1724
+#define RADEON_CACHE_LINE                   0x0f0c /* PCI */
+#define RADEON_CAPABILITIES_ID              0x0f50 /* PCI */
+#define RADEON_CAPABILITIES_PTR             0x0f34 /* PCI */
+#define RADEON_CLK_PIN_CNTL                 0x0001 /* PLL */
+#       define RADEON_DONT_USE_XTALIN       (1 << 4)
+#       define RADEON_SCLK_DYN_START_CNTL   (1 << 15)
+#define RADEON_CLOCK_CNTL_DATA              0x000c
+#define RADEON_CLOCK_CNTL_INDEX             0x0008
+#       define RADEON_PLL_WR_EN             (1 << 7)
+#       define RADEON_PLL_DIV_SEL           (3 << 8)
+#       define RADEON_PLL2_DIV_SEL_MASK     (~(3 << 8))
+#define RADEON_CLK_PWRMGT_CNTL              0x0014
+#       define RADEON_ENGIN_DYNCLK_MODE     (1 << 12)
+#       define RADEON_ACTIVE_HILO_LAT_MASK  (3 << 13)
+#       define RADEON_ACTIVE_HILO_LAT_SHIFT 13
+#       define RADEON_DISP_DYN_STOP_LAT_MASK (1 << 12)
+#       define RADEON_MC_BUSY               (1 << 16)
+#       define RADEON_DLL_READY             (1 << 19)
+#       define RADEON_CG_NO1_DEBUG_0        (1 << 24)
+#       define RADEON_CG_NO1_DEBUG_MASK     (0x1f << 24)
+#       define RADEON_DYN_STOP_MODE_MASK    (7 << 21)
+#       define RADEON_TVPLL_PWRMGT_OFF      (1 << 30)
+#       define RADEON_TVCLK_TURNOFF         (1 << 31)
+#define RADEON_PLL_PWRMGT_CNTL              0x0015 /* PLL */
+#       define RADEON_TCL_BYPASS_DISABLE    (1 << 20)
+#define RADEON_CLR_CMP_CLR_3D               0x1a24
+#define RADEON_CLR_CMP_CLR_DST              0x15c8
+#define RADEON_CLR_CMP_CLR_SRC              0x15c4
+#define RADEON_CLR_CMP_CNTL                 0x15c0
+#       define RADEON_SRC_CMP_EQ_COLOR      (4 <<  0)
+#       define RADEON_SRC_CMP_NEQ_COLOR     (5 <<  0)
+#       define RADEON_CLR_CMP_SRC_SOURCE    (1 << 24)
+#define RADEON_CLR_CMP_MASK                 0x15cc
+#       define RADEON_CLR_CMP_MSK           0xffffffff
+#define RADEON_CLR_CMP_MASK_3D              0x1A28
+#define RADEON_COMMAND                      0x0f04 /* PCI */
+#define RADEON_COMPOSITE_SHADOW_ID          0x1a0c
+#define RADEON_CONFIG_APER_0_BASE           0x0100
+#define RADEON_CONFIG_APER_1_BASE           0x0104
+#define RADEON_CONFIG_APER_SIZE             0x0108
+#define RADEON_CONFIG_BONDS                 0x00e8
+#define RADEON_CONFIG_CNTL                  0x00e0
+#       define RADEON_CFG_ATI_REV_A11       (0   << 16)
+#       define RADEON_CFG_ATI_REV_A12       (1   << 16)
+#       define RADEON_CFG_ATI_REV_A13       (2   << 16)
+#       define RADEON_CFG_ATI_REV_ID_MASK   (0xf << 16)
+#define RADEON_CONFIG_MEMSIZE               0x00f8
+#define RADEON_CONFIG_MEMSIZE_EMBEDDED      0x0114
+#define RADEON_CONFIG_REG_1_BASE            0x010c
+#define RADEON_CONFIG_REG_APER_SIZE         0x0110
+#define RADEON_CONFIG_XSTRAP                0x00e4
+#define RADEON_CONSTANT_COLOR_C             0x1d34
+#       define RADEON_CONSTANT_COLOR_MASK   0x00ffffff
+#       define RADEON_CONSTANT_COLOR_ONE    0x00ffffff
+#       define RADEON_CONSTANT_COLOR_ZERO   0x00000000
+#define RADEON_CRC_CMDFIFO_ADDR             0x0740
+#define RADEON_CRC_CMDFIFO_DOUT             0x0744
+#define RADEON_GRPH_BUFFER_CNTL             0x02f0
+#       define RADEON_GRPH_START_REQ_MASK          (0x7f)
+#       define RADEON_GRPH_START_REQ_SHIFT         0
+#       define RADEON_GRPH_STOP_REQ_MASK           (0x7f<<8)
+#       define RADEON_GRPH_STOP_REQ_SHIFT          8
+#       define RADEON_GRPH_CRITICAL_POINT_MASK     (0x7f<<16)
+#       define RADEON_GRPH_CRITICAL_POINT_SHIFT    16
+#       define RADEON_GRPH_CRITICAL_CNTL           (1<<28)
+#       define RADEON_GRPH_BUFFER_SIZE             (1<<29)
+#       define RADEON_GRPH_CRITICAL_AT_SOF         (1<<30)
+#       define RADEON_GRPH_STOP_CNTL               (1<<31)
+#define RADEON_GRPH2_BUFFER_CNTL            0x03f0
+#       define RADEON_GRPH2_START_REQ_MASK         (0x7f)
+#       define RADEON_GRPH2_START_REQ_SHIFT         0
+#       define RADEON_GRPH2_STOP_REQ_MASK          (0x7f<<8)
+#       define RADEON_GRPH2_STOP_REQ_SHIFT         8
+#       define RADEON_GRPH2_CRITICAL_POINT_MASK    (0x7f<<16)
+#       define RADEON_GRPH2_CRITICAL_POINT_SHIFT   16
+#       define RADEON_GRPH2_CRITICAL_CNTL          (1<<28)
+#       define RADEON_GRPH2_BUFFER_SIZE            (1<<29)
+#       define RADEON_GRPH2_CRITICAL_AT_SOF        (1<<30)
+#       define RADEON_GRPH2_STOP_CNTL              (1<<31)
+#define RADEON_CRTC_CRNT_FRAME              0x0214
+#define RADEON_CRTC_EXT_CNTL                0x0054
+#       define RADEON_CRTC_VGA_XOVERSCAN    (1 <<  0)
+#       define RADEON_VGA_ATI_LINEAR        (1 <<  3)
+#       define RADEON_XCRT_CNT_EN           (1 <<  6)
+#       define RADEON_CRTC_HSYNC_DIS        (1 <<  8)
+#       define RADEON_CRTC_VSYNC_DIS        (1 <<  9)
+#       define RADEON_CRTC_DISPLAY_DIS      (1 << 10)
+#       define RADEON_CRTC_SYNC_TRISTAT     (1 << 11)
+#       define RADEON_CRTC_CRT_ON           (1 << 15)
+#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE      0x0055
+#       define RADEON_CRTC_HSYNC_DIS_BYTE   (1 <<  0)
+#       define RADEON_CRTC_VSYNC_DIS_BYTE   (1 <<  1)
+#       define RADEON_CRTC_DISPLAY_DIS_BYTE (1 <<  2)
+#define RADEON_CRTC_GEN_CNTL                0x0050
+#       define RADEON_CRTC_DBL_SCAN_EN      (1 <<  0)
+#       define RADEON_CRTC_INTERLACE_EN     (1 <<  1)
+#       define RADEON_CRTC_CSYNC_EN         (1 <<  4)
+#       define RADEON_CRTC_ICON_EN          (1 << 15)
+#       define RADEON_CRTC_CUR_EN           (1 << 16)
+#       define RADEON_CRTC_CUR_MODE_MASK    (7 << 20)
+#       define RADEON_CRTC_CUR_MODE_SHIFT   20
+#       define RADEON_CRTC_CUR_MODE_MONO    0
+#       define RADEON_CRTC_CUR_MODE_24BPP   2
+#       define RADEON_CRTC_EXT_DISP_EN      (1 << 24)
+#       define RADEON_CRTC_EN               (1 << 25)
+#       define RADEON_CRTC_DISP_REQ_EN_B    (1 << 26)
+#define RADEON_CRTC2_GEN_CNTL               0x03f8
+#       define RADEON_CRTC2_DBL_SCAN_EN     (1 <<  0)
+#       define RADEON_CRTC2_INTERLACE_EN    (1 <<  1)
+#       define RADEON_CRTC2_SYNC_TRISTAT    (1 <<  4)
+#       define RADEON_CRTC2_HSYNC_TRISTAT   (1 <<  5)
+#       define RADEON_CRTC2_VSYNC_TRISTAT   (1 <<  6)
+#       define RADEON_CRTC2_CRT2_ON         (1 <<  7)
+#       define RADEON_CRTC2_PIX_WIDTH_SHIFT 8
+#       define RADEON_CRTC2_PIX_WIDTH_MASK  (0xf << 8)
+#       define RADEON_CRTC2_ICON_EN         (1 << 15)
+#       define RADEON_CRTC2_CUR_EN          (1 << 16)
+#       define RADEON_CRTC2_CUR_MODE_MASK   (7 << 20)
+#       define RADEON_CRTC2_DISP_DIS        (1 << 23)
+#       define RADEON_CRTC2_EN              (1 << 25)
+#       define RADEON_CRTC2_DISP_REQ_EN_B   (1 << 26)
+#       define RADEON_CRTC2_CSYNC_EN        (1 << 27)
+#       define RADEON_CRTC2_HSYNC_DIS       (1 << 28)
+#       define RADEON_CRTC2_VSYNC_DIS       (1 << 29)
+#define RADEON_CRTC_MORE_CNTL               0x27c
+#       define RADEON_CRTC_AUTO_HORZ_CENTER_EN (1<<2)
+#       define RADEON_CRTC_AUTO_VERT_CENTER_EN (1<<3)
+#       define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
+#       define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
+#define RADEON_CRTC_GUI_TRIG_VLINE          0x0218
+#define RADEON_CRTC_H_SYNC_STRT_WID         0x0204
+#       define RADEON_CRTC_H_SYNC_STRT_PIX        (0x07  <<  0)
+#       define RADEON_CRTC_H_SYNC_STRT_CHAR       (0x3ff <<  3)
+#       define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3
+#       define RADEON_CRTC_H_SYNC_WID             (0x3f  << 16)
+#       define RADEON_CRTC_H_SYNC_WID_SHIFT       16
+#       define RADEON_CRTC_H_SYNC_POL             (1     << 23)
+#define RADEON_CRTC2_H_SYNC_STRT_WID        0x0304
+#       define RADEON_CRTC2_H_SYNC_STRT_PIX        (0x07  <<  0)
+#       define RADEON_CRTC2_H_SYNC_STRT_CHAR       (0x3ff <<  3)
+#       define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3
+#       define RADEON_CRTC2_H_SYNC_WID             (0x3f  << 16)
+#       define RADEON_CRTC2_H_SYNC_WID_SHIFT       16
+#       define RADEON_CRTC2_H_SYNC_POL             (1     << 23)
+#define RADEON_CRTC_H_TOTAL_DISP            0x0200
+#       define RADEON_CRTC_H_TOTAL          (0x03ff << 0)
+#       define RADEON_CRTC_H_TOTAL_SHIFT    0
+#       define RADEON_CRTC_H_DISP           (0x01ff << 16)
+#       define RADEON_CRTC_H_DISP_SHIFT     16
+#define RADEON_CRTC2_H_TOTAL_DISP           0x0300
+#       define RADEON_CRTC2_H_TOTAL         (0x03ff << 0)
+#       define RADEON_CRTC2_H_TOTAL_SHIFT   0
+#       define RADEON_CRTC2_H_DISP          (0x01ff << 16)
+#       define RADEON_CRTC2_H_DISP_SHIFT    16
+
+#define RADEON_CRTC_OFFSET_RIGHT           0x0220
+#define RADEON_CRTC_OFFSET                  0x0224
+#      define RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET (1<<30)
+#      define RADEON_CRTC_OFFSET__OFFSET_LOCK     (1<<31)
+
+#define RADEON_CRTC2_OFFSET                 0x0324
+#      define RADEON_CRTC2_OFFSET__GUI_TRIG_OFFSET (1<<30)
+#      define RADEON_CRTC2_OFFSET__OFFSET_LOCK     (1<<31)
+#define RADEON_CRTC_OFFSET_CNTL             0x0228
+#       define RADEON_CRTC_TILE_LINE_SHIFT              0
+#       define RADEON_CRTC_TILE_LINE_RIGHT_SHIFT        4
+#      define R300_CRTC_X_Y_MODE_EN_RIGHT              (1 << 6)
+#      define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_MASK   (3 << 7)
+#      define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_AUTO   (0 << 7)
+#      define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_SINGLE (1 << 7)
+#      define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DOUBLE (2 << 7)
+#      define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DIS    (3 << 7)
+#      define R300_CRTC_X_Y_MODE_EN                    (1 << 9)
+#      define R300_CRTC_MICRO_TILE_BUFFER_MASK         (3 << 10)
+#      define R300_CRTC_MICRO_TILE_BUFFER_AUTO         (0 << 10)
+#      define R300_CRTC_MICRO_TILE_BUFFER_SINGLE       (1 << 10)
+#      define R300_CRTC_MICRO_TILE_BUFFER_DOUBLE       (2 << 10)
+#      define R300_CRTC_MICRO_TILE_BUFFER_DIS          (3 << 10)
+#      define R300_CRTC_MICRO_TILE_EN_RIGHT            (1 << 12)
+#      define R300_CRTC_MICRO_TILE_EN                  (1 << 13)
+#      define R300_CRTC_MACRO_TILE_EN_RIGHT            (1 << 14)
+#       define R300_CRTC_MACRO_TILE_EN                  (1 << 15)
+#       define RADEON_CRTC_TILE_EN_RIGHT                (1 << 14)
+#       define RADEON_CRTC_TILE_EN                      (1 << 15)
+#       define RADEON_CRTC_OFFSET_FLIP_CNTL             (1 << 16)
+#       define RADEON_CRTC_STEREO_OFFSET_EN             (1 << 17)
+
+#define R300_CRTC_TILE_X0_Y0               0x0350
+#define R300_CRTC2_TILE_X0_Y0              0x0358
+
+#define RADEON_CRTC2_OFFSET_CNTL            0x0328
+#       define RADEON_CRTC2_OFFSET_FLIP_CNTL (1 << 16)
+#       define RADEON_CRTC2_TILE_EN         (1 << 15)
+#define RADEON_CRTC_PITCH                   0x022c
+#      define RADEON_CRTC_PITCH__SHIFT          0
+#      define RADEON_CRTC_PITCH__RIGHT_SHIFT   16
+
+#define RADEON_CRTC2_PITCH                  0x032c
+#define RADEON_CRTC_STATUS                  0x005c
+#       define RADEON_CRTC_VBLANK_SAVE      (1 <<  1)
+#       define RADEON_CRTC_VBLANK_SAVE_CLEAR  (1 <<  1)
+#define RADEON_CRTC2_STATUS                  0x03fc
+#       define RADEON_CRTC2_VBLANK_SAVE      (1 <<  1)
+#       define RADEON_CRTC2_VBLANK_SAVE_CLEAR  (1 <<  1)
+#define RADEON_CRTC_V_SYNC_STRT_WID         0x020c
+#       define RADEON_CRTC_V_SYNC_STRT        (0x7ff <<  0)
+#       define RADEON_CRTC_V_SYNC_STRT_SHIFT  0
+#       define RADEON_CRTC_V_SYNC_WID         (0x1f  << 16)
+#       define RADEON_CRTC_V_SYNC_WID_SHIFT   16
+#       define RADEON_CRTC_V_SYNC_POL         (1     << 23)
+#define RADEON_CRTC2_V_SYNC_STRT_WID        0x030c
+#       define RADEON_CRTC2_V_SYNC_STRT       (0x7ff <<  0)
+#       define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0
+#       define RADEON_CRTC2_V_SYNC_WID        (0x1f  << 16)
+#       define RADEON_CRTC2_V_SYNC_WID_SHIFT  16
+#       define RADEON_CRTC2_V_SYNC_POL        (1     << 23)
+#define RADEON_CRTC_V_TOTAL_DISP            0x0208
+#       define RADEON_CRTC_V_TOTAL          (0x07ff << 0)
+#       define RADEON_CRTC_V_TOTAL_SHIFT    0
+#       define RADEON_CRTC_V_DISP           (0x07ff << 16)
+#       define RADEON_CRTC_V_DISP_SHIFT     16
+#define RADEON_CRTC2_V_TOTAL_DISP           0x0308
+#       define RADEON_CRTC2_V_TOTAL         (0x07ff << 0)
+#       define RADEON_CRTC2_V_TOTAL_SHIFT   0
+#       define RADEON_CRTC2_V_DISP          (0x07ff << 16)
+#       define RADEON_CRTC2_V_DISP_SHIFT    16
+#define RADEON_CRTC_VLINE_CRNT_VLINE        0x0210
+#       define RADEON_CRTC_CRNT_VLINE_MASK  (0x7ff << 16)
+#define RADEON_CRTC2_CRNT_FRAME             0x0314
+#define RADEON_CRTC2_GUI_TRIG_VLINE         0x0318
+#define RADEON_CRTC2_STATUS                 0x03fc
+#define RADEON_CRTC2_VLINE_CRNT_VLINE       0x0310
+#define RADEON_CRTC8_DATA                   0x03d5 /* VGA, 0x3b5 */
+#define RADEON_CRTC8_IDX                    0x03d4 /* VGA, 0x3b4 */
+#define RADEON_CUR_CLR0                     0x026c
+#define RADEON_CUR_CLR1                     0x0270
+#define RADEON_CUR_HORZ_VERT_OFF            0x0268
+#define RADEON_CUR_HORZ_VERT_POSN           0x0264
+#define RADEON_CUR_OFFSET                   0x0260
+#       define RADEON_CUR_LOCK              (1 << 31)
+#define RADEON_CUR2_CLR0                    0x036c
+#define RADEON_CUR2_CLR1                    0x0370
+#define RADEON_CUR2_HORZ_VERT_OFF           0x0368
+#define RADEON_CUR2_HORZ_VERT_POSN          0x0364
+#define RADEON_CUR2_OFFSET                  0x0360
+#       define RADEON_CUR2_LOCK             (1 << 31)
+
+#define RADEON_DAC_CNTL                     0x0058
+#       define RADEON_DAC_RANGE_CNTL        (3 <<  0)
+#       define RADEON_DAC_RANGE_CNTL_PS2    (2 <<  0)
+#       define RADEON_DAC_RANGE_CNTL_MASK   0x03
+#       define RADEON_DAC_BLANKING          (1 <<  2)
+#       define RADEON_DAC_CMP_EN            (1 <<  3)
+#       define RADEON_DAC_CMP_OUTPUT        (1 <<  7)
+#       define RADEON_DAC_8BIT_EN           (1 <<  8)
+#       define RADEON_DAC_TVO_EN            (1 << 10)
+#       define RADEON_DAC_VGA_ADR_EN        (1 << 13)
+#       define RADEON_DAC_PDWN              (1 << 15)
+#       define RADEON_DAC_MASK_ALL          (0xff << 24)
+#define RADEON_DAC_CNTL2                    0x007c
+#       define RADEON_DAC2_TV_CLK_SEL       (0 <<  1)
+#       define RADEON_DAC2_DAC_CLK_SEL      (1 <<  0)
+#       define RADEON_DAC2_DAC2_CLK_SEL     (1 <<  1)
+#       define RADEON_DAC2_PALETTE_ACC_CTL  (1 <<  5)
+#       define RADEON_DAC2_CMP_EN           (1 <<  7)
+#       define RADEON_DAC2_CMP_OUT_R        (1 <<  8)
+#       define RADEON_DAC2_CMP_OUT_G        (1 <<  9)
+#       define RADEON_DAC2_CMP_OUT_B        (1 << 10)
+#       define RADEON_DAC2_CMP_OUTPUT       (1 << 11)
+#define RADEON_DAC_EXT_CNTL                 0x0280
+#       define RADEON_DAC2_FORCE_BLANK_OFF_EN (1 << 0)
+#       define RADEON_DAC2_FORCE_DATA_EN      (1 << 1)
+#       define RADEON_DAC_FORCE_BLANK_OFF_EN  (1 << 4)
+#       define RADEON_DAC_FORCE_DATA_EN       (1 << 5)
+#       define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_R    (0 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_G    (1 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_B    (2 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_RGB  (3 << 6)
+#       define RADEON_DAC_FORCE_DATA_MASK   0x0003ff00
+#       define RADEON_DAC_FORCE_DATA_SHIFT  8
+#define RADEON_DAC_MACRO_CNTL               0x0d04
+#       define RADEON_DAC_PDWN_R            (1 << 16)
+#       define RADEON_DAC_PDWN_G            (1 << 17)
+#       define RADEON_DAC_PDWN_B            (1 << 18)
+#define RADEON_DISP_PWR_MAN                 0x0d08
+#       define RADEON_DISP_PWR_MAN_D3_CRTC_EN      (1 << 0)
+#       define RADEON_DISP_PWR_MAN_D3_CRTC2_EN     (1 << 4)
+#       define RADEON_DISP_PWR_MAN_DPMS_ON  (0 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_STANDBY    (1 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_SUSPEND    (2 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8)
+#       define RADEON_DISP_D3_RST           (1 << 16)
+#       define RADEON_DISP_D3_REG_RST       (1 << 17)
+#       define RADEON_DISP_D3_GRPH_RST      (1 << 18)
+#       define RADEON_DISP_D3_SUBPIC_RST    (1 << 19)
+#       define RADEON_DISP_D3_OV0_RST       (1 << 20)
+#       define RADEON_DISP_D1D2_GRPH_RST    (1 << 21)
+#       define RADEON_DISP_D1D2_SUBPIC_RST  (1 << 22)
+#       define RADEON_DISP_D1D2_OV0_RST     (1 << 23)
+#       define RADEON_DIG_TMDS_ENABLE_RST   (1 << 24)
+#       define RADEON_TV_ENABLE_RST         (1 << 25)
+#       define RADEON_AUTO_PWRUP_EN         (1 << 26)
+#define RADEON_TV_DAC_CNTL                  0x088c
+#       define RADEON_TV_DAC_NBLANK         (1 << 0)
+#       define RADEON_TV_DAC_NHOLD          (1 << 1)
+#       define RADEON_TV_DAC_PEDESTAL       (1 <<  2)
+#       define RADEON_TV_MONITOR_DETECT_EN  (1 <<  4)
+#       define RADEON_TV_DAC_CMPOUT         (1 <<  5)
+#       define RADEON_TV_DAC_STD_MASK       (3 <<  8)
+#       define RADEON_TV_DAC_STD_PAL        (0 <<  8)
+#       define RADEON_TV_DAC_STD_NTSC       (1 <<  8)
+#       define RADEON_TV_DAC_STD_PS2        (2 <<  8)
+#       define RADEON_TV_DAC_STD_RS343      (3 <<  8)
+#       define RADEON_TV_DAC_BGSLEEP        (1 <<  6)
+#       define RADEON_TV_DAC_BGADJ_MASK     (0xf <<  16)
+#       define RADEON_TV_DAC_BGADJ_SHIFT    16
+#       define RADEON_TV_DAC_DACADJ_MASK    (0xf <<  20)
+#       define RADEON_TV_DAC_DACADJ_SHIFT   20
+#       define RADEON_TV_DAC_RDACPD         (1 <<  24)
+#       define RADEON_TV_DAC_GDACPD         (1 <<  25)
+#       define RADEON_TV_DAC_BDACPD         (1 <<  26)
+#       define RADEON_TV_DAC_RDACDET        (1 << 29)
+#       define RADEON_TV_DAC_GDACDET        (1 << 30)
+#       define RADEON_TV_DAC_BDACDET        (1 << 31)
+#       define R420_TV_DAC_DACADJ_MASK      (0x1f <<  20)
+#       define R420_TV_DAC_RDACPD           (1 <<  25)
+#       define R420_TV_DAC_GDACPD           (1 <<  26)
+#       define R420_TV_DAC_BDACPD           (1 <<  27)
+#       define R420_TV_DAC_TVENABLE         (1 <<  28)
+#define RADEON_DISP_HW_DEBUG                0x0d14
+#       define RADEON_CRT2_DISP1_SEL        (1 <<  5)
+#define RADEON_DISP_OUTPUT_CNTL             0x0d64
+#       define RADEON_DISP_DAC_SOURCE_MASK  0x03
+#       define RADEON_DISP_DAC2_SOURCE_MASK  0x0c
+#       define RADEON_DISP_DAC_SOURCE_CRTC2 0x01
+#       define RADEON_DISP_DAC_SOURCE_RMX   0x02
+#       define RADEON_DISP_DAC_SOURCE_LTU   0x03
+#       define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04
+#       define RADEON_DISP_TVDAC_SOURCE_MASK  (0x03 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_CRTC  0x0
+#       define RADEON_DISP_TVDAC_SOURCE_CRTC2 (0x01 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_RMX   (0x02 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_LTU   (0x03 << 2)
+#       define RADEON_DISP_TRANS_MATRIX_MASK  (0x03 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_ALPHA_MSB (0x00 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_GRAPHICS  (0x01 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_VIDEO     (0x02 << 4)
+#       define RADEON_DISP_TV_SOURCE_CRTC   (1 << 16) /* crtc1 or crtc2 */
+#       define RADEON_DISP_TV_SOURCE_LTU    (0 << 16) /* linear transform unit */
+#define RADEON_DISP_TV_OUT_CNTL             0x0d6c
+#       define RADEON_DISP_TV_PATH_SRC_CRTC2 (1 << 16)
+#       define RADEON_DISP_TV_PATH_SRC_CRTC1 (0 << 16)
+#define RADEON_DAC_CRC_SIG                  0x02cc
+#define RADEON_DAC_DATA                     0x03c9 /* VGA */
+#define RADEON_DAC_MASK                     0x03c6 /* VGA */
+#define RADEON_DAC_R_INDEX                  0x03c7 /* VGA */
+#define RADEON_DAC_W_INDEX                  0x03c8 /* VGA */
+#define RADEON_DDA_CONFIG                   0x02e0
+#define RADEON_DDA_ON_OFF                   0x02e4
+#define RADEON_DEFAULT_OFFSET               0x16e0
+#define RADEON_DEFAULT_PITCH                0x16e4
+#define RADEON_DEFAULT_SC_BOTTOM_RIGHT      0x16e8
+#       define RADEON_DEFAULT_SC_RIGHT_MAX  (0x1fff <<  0)
+#       define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16)
+#define RADEON_DESTINATION_3D_CLR_CMP_VAL   0x1820
+#define RADEON_DESTINATION_3D_CLR_CMP_MSK   0x1824
+#define RADEON_DEVICE_ID                    0x0f02 /* PCI */
+#define RADEON_DISP_MISC_CNTL               0x0d00
+#       define RADEON_SOFT_RESET_GRPH_PP    (1 << 0)
+#define RADEON_DISP_MERGE_CNTL           0x0d60
+#       define RADEON_DISP_ALPHA_MODE_MASK  0x03
+#       define RADEON_DISP_ALPHA_MODE_KEY   0
+#       define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1
+#       define RADEON_DISP_ALPHA_MODE_GLOBAL 2
+#       define RADEON_DISP_RGB_OFFSET_EN    (1 << 8)
+#       define RADEON_DISP_GRPH_ALPHA_MASK  (0xff << 16)
+#       define RADEON_DISP_OV0_ALPHA_MASK   (0xff << 24)
+#      define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9)
+#define RADEON_DISP2_MERGE_CNTL                    0x0d68
+#       define RADEON_DISP2_RGB_OFFSET_EN   (1 << 8)
+#define RADEON_DISP_LIN_TRANS_GRPH_A        0x0d80
+#define RADEON_DISP_LIN_TRANS_GRPH_B        0x0d84
+#define RADEON_DISP_LIN_TRANS_GRPH_C        0x0d88
+#define RADEON_DISP_LIN_TRANS_GRPH_D        0x0d8c
+#define RADEON_DISP_LIN_TRANS_GRPH_E        0x0d90
+#define RADEON_DISP_LIN_TRANS_GRPH_F        0x0d98
+#define RADEON_DP_BRUSH_BKGD_CLR            0x1478
+#define RADEON_DP_BRUSH_FRGD_CLR            0x147c
+#define RADEON_DP_CNTL                      0x16c0
+#       define RADEON_DST_X_LEFT_TO_RIGHT   (1 <<  0)
+#       define RADEON_DST_Y_TOP_TO_BOTTOM   (1 <<  1)
+#       define RADEON_DP_DST_TILE_LINEAR    (0 <<  3)
+#       define RADEON_DP_DST_TILE_MACRO     (1 <<  3)
+#       define RADEON_DP_DST_TILE_MICRO     (2 <<  3)
+#       define RADEON_DP_DST_TILE_BOTH      (3 <<  3)
+#define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR     0x16d0
+#       define RADEON_DST_Y_MAJOR             (1 <<  2)
+#       define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15)
+#       define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1 << 31)
+#define RADEON_DP_DATATYPE                  0x16c4
+#       define RADEON_HOST_BIG_ENDIAN_EN    (1 << 29)
+#define RADEON_DP_GUI_MASTER_CNTL           0x146c
+#       define RADEON_GMC_SRC_PITCH_OFFSET_CNTL   (1    <<  0)
+#       define RADEON_GMC_DST_PITCH_OFFSET_CNTL   (1    <<  1)
+#       define RADEON_GMC_SRC_CLIPPING            (1    <<  2)
+#       define RADEON_GMC_DST_CLIPPING            (1    <<  3)
+#       define RADEON_GMC_BRUSH_DATATYPE_MASK     (0x0f <<  4)
+#       define RADEON_GMC_BRUSH_8X8_MONO_FG_BG    (0    <<  4)
+#       define RADEON_GMC_BRUSH_8X8_MONO_FG_LA    (1    <<  4)
+#       define RADEON_GMC_BRUSH_1X8_MONO_FG_BG    (4    <<  4)
+#       define RADEON_GMC_BRUSH_1X8_MONO_FG_LA    (5    <<  4)
+#       define RADEON_GMC_BRUSH_32x1_MONO_FG_BG   (6    <<  4)
+#       define RADEON_GMC_BRUSH_32x1_MONO_FG_LA   (7    <<  4)
+#       define RADEON_GMC_BRUSH_32x32_MONO_FG_BG  (8    <<  4)
+#       define RADEON_GMC_BRUSH_32x32_MONO_FG_LA  (9    <<  4)
+#       define RADEON_GMC_BRUSH_8x8_COLOR         (10   <<  4)
+#       define RADEON_GMC_BRUSH_1X8_COLOR         (12   <<  4)
+#       define RADEON_GMC_BRUSH_SOLID_COLOR       (13   <<  4)
+#       define RADEON_GMC_BRUSH_NONE              (15   <<  4)
+#       define RADEON_GMC_DST_8BPP_CI             (2    <<  8)
+#       define RADEON_GMC_DST_15BPP               (3    <<  8)
+#       define RADEON_GMC_DST_16BPP               (4    <<  8)
+#       define RADEON_GMC_DST_24BPP               (5    <<  8)
+#       define RADEON_GMC_DST_32BPP               (6    <<  8)
+#       define RADEON_GMC_DST_8BPP_RGB            (7    <<  8)
+#       define RADEON_GMC_DST_Y8                  (8    <<  8)
+#       define RADEON_GMC_DST_RGB8                (9    <<  8)
+#       define RADEON_GMC_DST_VYUY                (11   <<  8)
+#       define RADEON_GMC_DST_YVYU                (12   <<  8)
+#       define RADEON_GMC_DST_AYUV444             (14   <<  8)
+#       define RADEON_GMC_DST_ARGB4444            (15   <<  8)
+#       define RADEON_GMC_DST_DATATYPE_MASK       (0x0f <<  8)
+#       define RADEON_GMC_DST_DATATYPE_SHIFT      8
+#       define RADEON_GMC_SRC_DATATYPE_MASK       (3    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_COLOR      (3    << 12)
+#       define RADEON_GMC_BYTE_PIX_ORDER          (1    << 14)
+#       define RADEON_GMC_BYTE_MSB_TO_LSB         (0    << 14)
+#       define RADEON_GMC_BYTE_LSB_TO_MSB         (1    << 14)
+#       define RADEON_GMC_CONVERSION_TEMP         (1    << 15)
+#       define RADEON_GMC_CONVERSION_TEMP_6500    (0    << 15)
+#       define RADEON_GMC_CONVERSION_TEMP_9300    (1    << 15)
+#       define RADEON_GMC_ROP3_MASK               (0xff << 16)
+#       define RADEON_DP_SRC_SOURCE_MASK          (7    << 24)
+#       define RADEON_DP_SRC_SOURCE_MEMORY        (2    << 24)
+#       define RADEON_DP_SRC_SOURCE_HOST_DATA     (3    << 24)
+#       define RADEON_GMC_3D_FCN_EN               (1    << 27)
+#       define RADEON_GMC_CLR_CMP_CNTL_DIS        (1    << 28)
+#       define RADEON_GMC_AUX_CLIP_DIS            (1    << 29)
+#       define RADEON_GMC_WR_MSK_DIS              (1    << 30)
+#       define RADEON_GMC_LD_BRUSH_Y_X            (1    << 31)
+#       define RADEON_ROP3_ZERO             0x00000000
+#       define RADEON_ROP3_DSa              0x00880000
+#       define RADEON_ROP3_SDna             0x00440000
+#       define RADEON_ROP3_S                0x00cc0000
+#       define RADEON_ROP3_DSna             0x00220000
+#       define RADEON_ROP3_D                0x00aa0000
+#       define RADEON_ROP3_DSx              0x00660000
+#       define RADEON_ROP3_DSo              0x00ee0000
+#       define RADEON_ROP3_DSon             0x00110000
+#       define RADEON_ROP3_DSxn             0x00990000
+#       define RADEON_ROP3_Dn               0x00550000
+#       define RADEON_ROP3_SDno             0x00dd0000
+#       define RADEON_ROP3_Sn               0x00330000
+#       define RADEON_ROP3_DSno             0x00bb0000
+#       define RADEON_ROP3_DSan             0x00770000
+#       define RADEON_ROP3_ONE              0x00ff0000
+#       define RADEON_ROP3_DPa              0x00a00000
+#       define RADEON_ROP3_PDna             0x00500000
+#       define RADEON_ROP3_P                0x00f00000
+#       define RADEON_ROP3_DPna             0x000a0000
+#       define RADEON_ROP3_D                0x00aa0000
+#       define RADEON_ROP3_DPx              0x005a0000
+#       define RADEON_ROP3_DPo              0x00fa0000
+#       define RADEON_ROP3_DPon             0x00050000
+#       define RADEON_ROP3_PDxn             0x00a50000
+#       define RADEON_ROP3_PDno             0x00f50000
+#       define RADEON_ROP3_Pn               0x000f0000
+#       define RADEON_ROP3_DPno             0x00af0000
+#       define RADEON_ROP3_DPan             0x005f0000
+#define RADEON_DP_GUI_MASTER_CNTL_C         0x1c84
+#define RADEON_DP_MIX                       0x16c8
+#define RADEON_DP_SRC_BKGD_CLR              0x15dc
+#define RADEON_DP_SRC_FRGD_CLR              0x15d8
+#define RADEON_DP_WRITE_MASK                0x16cc
+#define RADEON_DST_BRES_DEC                 0x1630
+#define RADEON_DST_BRES_ERR                 0x1628
+#define RADEON_DST_BRES_INC                 0x162c
+#define RADEON_DST_BRES_LNTH                0x1634
+#define RADEON_DST_BRES_LNTH_SUB            0x1638
+#define RADEON_DST_HEIGHT                   0x1410
+#define RADEON_DST_HEIGHT_WIDTH             0x143c
+#define RADEON_DST_HEIGHT_WIDTH_8           0x158c
+#define RADEON_DST_HEIGHT_WIDTH_BW          0x15b4
+#define RADEON_DST_HEIGHT_Y                 0x15a0
+#define RADEON_DST_LINE_START               0x1600
+#define RADEON_DST_LINE_END                 0x1604
+#define RADEON_DST_LINE_PATCOUNT            0x1608
+#       define RADEON_BRES_CNTL_SHIFT       8
+#define RADEON_DST_OFFSET                   0x1404
+#define RADEON_DST_PITCH                    0x1408
+#define RADEON_DST_PITCH_OFFSET             0x142c
+#define RADEON_DST_PITCH_OFFSET_C           0x1c80
+#       define RADEON_PITCH_SHIFT           21
+#       define RADEON_DST_TILE_LINEAR       (0 << 30)
+#       define RADEON_DST_TILE_MACRO        (1 << 30)
+#       define RADEON_DST_TILE_MICRO        (2 << 30)
+#       define RADEON_DST_TILE_BOTH         (3 << 30)
+#define RADEON_DST_WIDTH                    0x140c
+#define RADEON_DST_WIDTH_HEIGHT             0x1598
+#define RADEON_DST_WIDTH_X                  0x1588
+#define RADEON_DST_WIDTH_X_INCY             0x159c
+#define RADEON_DST_X                        0x141c
+#define RADEON_DST_X_SUB                    0x15a4
+#define RADEON_DST_X_Y                      0x1594
+#define RADEON_DST_Y                        0x1420
+#define RADEON_DST_Y_SUB                    0x15a8
+#define RADEON_DST_Y_X                      0x1438
+
+#define RADEON_FCP_CNTL                     0x0910
+#      define RADEON_FCP0_SRC_PCICLK             0
+#      define RADEON_FCP0_SRC_PCLK               1
+#      define RADEON_FCP0_SRC_PCLKb              2
+#      define RADEON_FCP0_SRC_HREF               3
+#      define RADEON_FCP0_SRC_GND                4
+#      define RADEON_FCP0_SRC_HREFb              5
+#define RADEON_FLUSH_1                      0x1704
+#define RADEON_FLUSH_2                      0x1708
+#define RADEON_FLUSH_3                      0x170c
+#define RADEON_FLUSH_4                      0x1710
+#define RADEON_FLUSH_5                      0x1714
+#define RADEON_FLUSH_6                      0x1718
+#define RADEON_FLUSH_7                      0x171c
+#define RADEON_FOG_3D_TABLE_START           0x1810
+#define RADEON_FOG_3D_TABLE_END             0x1814
+#define RADEON_FOG_3D_TABLE_DENSITY         0x181c
+#define RADEON_FOG_TABLE_INDEX              0x1a14
+#define RADEON_FOG_TABLE_DATA               0x1a18
+#define RADEON_FP_CRTC_H_TOTAL_DISP         0x0250
+#define RADEON_FP_CRTC_V_TOTAL_DISP         0x0254
+#       define RADEON_FP_CRTC_H_TOTAL_MASK      0x000003ff
+#       define RADEON_FP_CRTC_H_DISP_MASK       0x01ff0000
+#       define RADEON_FP_CRTC_V_TOTAL_MASK      0x00000fff
+#       define RADEON_FP_CRTC_V_DISP_MASK       0x0fff0000
+#       define RADEON_FP_H_SYNC_STRT_CHAR_MASK  0x00001ff8
+#       define RADEON_FP_H_SYNC_WID_MASK        0x003f0000
+#       define RADEON_FP_V_SYNC_STRT_MASK       0x00000fff
+#       define RADEON_FP_V_SYNC_WID_MASK        0x001f0000
+#       define RADEON_FP_CRTC_H_TOTAL_SHIFT     0x00000000
+#       define RADEON_FP_CRTC_H_DISP_SHIFT      0x00000010
+#       define RADEON_FP_CRTC_V_TOTAL_SHIFT     0x00000000
+#       define RADEON_FP_CRTC_V_DISP_SHIFT      0x00000010
+#       define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003
+#       define RADEON_FP_H_SYNC_WID_SHIFT       0x00000010
+#       define RADEON_FP_V_SYNC_STRT_SHIFT      0x00000000
+#       define RADEON_FP_V_SYNC_WID_SHIFT       0x00000010
+#define RADEON_FP_GEN_CNTL                  0x0284
+#       define RADEON_FP_FPON                  (1 <<  0)
+#       define RADEON_FP_BLANK_EN              (1 <<  1)
+#       define RADEON_FP_TMDS_EN               (1 <<  2)
+#       define RADEON_FP_PANEL_FORMAT          (1 <<  3)
+#       define RADEON_FP_EN_TMDS               (1 <<  7)
+#       define RADEON_FP_DETECT_SENSE          (1 <<  8)
+#       define R200_FP_SOURCE_SEL_MASK         (3 <<  10)
+#       define R200_FP_SOURCE_SEL_CRTC1        (0 <<  10)
+#       define R200_FP_SOURCE_SEL_CRTC2        (1 <<  10)
+#       define R200_FP_SOURCE_SEL_RMX          (2 <<  10)
+#       define R200_FP_SOURCE_SEL_TRANS        (3 <<  10)
+#       define RADEON_FP_SEL_CRTC1             (0 << 13)
+#       define RADEON_FP_SEL_CRTC2             (1 << 13)
+#       define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
+#       define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
+#       define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
+#       define RADEON_FP_CRTC_USE_SHADOW_VEND  (1 << 18)
+#       define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20)
+#       define RADEON_FP_DFP_SYNC_SEL          (1 << 21)
+#       define RADEON_FP_CRTC_LOCK_8DOT        (1 << 22)
+#       define RADEON_FP_CRT_SYNC_SEL          (1 << 23)
+#       define RADEON_FP_USE_SHADOW_EN         (1 << 24)
+#       define RADEON_FP_CRT_SYNC_ALT          (1 << 26)
+#define RADEON_FP2_GEN_CNTL                 0x0288
+#       define RADEON_FP2_BLANK_EN             (1 <<  1)
+#       define RADEON_FP2_ON                   (1 <<  2)
+#       define RADEON_FP2_PANEL_FORMAT         (1 <<  3)
+#       define RADEON_FP2_DETECT_SENSE         (1 <<  8)
+#       define R200_FP2_SOURCE_SEL_MASK        (3 << 10)
+#       define R200_FP2_SOURCE_SEL_CRTC1       (0 << 10)
+#       define R200_FP2_SOURCE_SEL_CRTC2       (1 << 10)
+#       define R200_FP2_SOURCE_SEL_RMX         (2 << 10)
+#       define R200_FP2_SOURCE_SEL_TRANS_UNIT  (3 << 10)
+#       define RADEON_FP2_SRC_SEL_MASK         (3 << 13)
+#       define RADEON_FP2_SRC_SEL_CRTC2        (1 << 13)
+#       define RADEON_FP2_FP_POL               (1 << 16)
+#       define RADEON_FP2_LP_POL               (1 << 17)
+#       define RADEON_FP2_SCK_POL              (1 << 18)
+#       define RADEON_FP2_LCD_CNTL_MASK        (7 << 19)
+#       define RADEON_FP2_PAD_FLOP_EN          (1 << 22)
+#       define RADEON_FP2_CRC_EN               (1 << 23)
+#       define RADEON_FP2_CRC_READ_EN          (1 << 24)
+#       define RADEON_FP2_DVO_EN               (1 << 25)
+#       define RADEON_FP2_DVO_RATE_SEL_SDR     (1 << 26)
+#       define R200_FP2_DVO_RATE_SEL_SDR       (1 << 27)
+#       define R300_FP2_DVO_CLOCK_MODE_SINGLE  (1 << 28)
+#       define R300_FP2_DVO_DUAL_CHANNEL_EN    (1 << 29)
+#define RADEON_FP_H_SYNC_STRT_WID           0x02c4
+#define RADEON_FP_H2_SYNC_STRT_WID          0x03c4
+#define RADEON_FP_HORZ_STRETCH              0x028c
+#define RADEON_FP_HORZ2_STRETCH             0x038c
+#       define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff
+#       define RADEON_HORZ_STRETCH_RATIO_MAX  4096
+#       define RADEON_HORZ_PANEL_SIZE         (0x1ff   << 16)
+#       define RADEON_HORZ_PANEL_SHIFT        16
+#       define RADEON_HORZ_STRETCH_PIXREP     (0      << 25)
+#       define RADEON_HORZ_STRETCH_BLEND      (1      << 26)
+#       define RADEON_HORZ_STRETCH_ENABLE     (1      << 25)
+#       define RADEON_HORZ_AUTO_RATIO         (1      << 27)
+#       define RADEON_HORZ_FP_LOOP_STRETCH    (0x7    << 28)
+#       define RADEON_HORZ_AUTO_RATIO_INC     (1      << 31)
+#define RADEON_FP_HORZ_VERT_ACTIVE          0x0278
+#define RADEON_FP_V_SYNC_STRT_WID           0x02c8
+#define RADEON_FP_VERT_STRETCH              0x0290
+#define RADEON_FP_V2_SYNC_STRT_WID          0x03c8
+#define RADEON_FP_VERT2_STRETCH             0x0390
+#       define RADEON_VERT_PANEL_SIZE          (0xfff << 12)
+#       define RADEON_VERT_PANEL_SHIFT         12
+#       define RADEON_VERT_STRETCH_RATIO_MASK  0xfff
+#       define RADEON_VERT_STRETCH_RATIO_SHIFT 0
+#       define RADEON_VERT_STRETCH_RATIO_MAX   4096
+#       define RADEON_VERT_STRETCH_ENABLE      (1     << 25)
+#       define RADEON_VERT_STRETCH_LINEREP     (0     << 26)
+#       define RADEON_VERT_STRETCH_BLEND       (1     << 26)
+#       define RADEON_VERT_AUTO_RATIO_EN       (1     << 27)
+#      define RADEON_VERT_AUTO_RATIO_INC      (1     << 31)
+#       define RADEON_VERT_STRETCH_RESERVED    0x71000000
+#define RS400_FP_2ND_GEN_CNTL               0x0384
+#       define RS400_FP_2ND_ON              (1 << 0)
+#       define RS400_FP_2ND_BLANK_EN        (1 << 1)
+#       define RS400_TMDS_2ND_EN            (1 << 2)
+#       define RS400_PANEL_FORMAT_2ND       (1 << 3)
+#       define RS400_FP_2ND_EN_TMDS         (1 << 7)
+#       define RS400_FP_2ND_DETECT_SENSE    (1 << 8)
+#       define RS400_FP_2ND_SOURCE_SEL_MASK        (3 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_CRTC1       (0 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_CRTC2       (1 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_RMX         (2 << 10)
+#       define RS400_FP_2ND_DETECT_EN       (1 << 12)
+#       define RS400_HPD_2ND_SEL            (1 << 13)
+#define RS400_FP2_2_GEN_CNTL                0x0388
+#       define RS400_FP2_2_BLANK_EN         (1 << 1)
+#       define RS400_FP2_2_ON               (1 << 2)
+#       define RS400_FP2_2_PANEL_FORMAT     (1 << 3)
+#       define RS400_FP2_2_DETECT_SENSE     (1 << 8)
+#       define RS400_FP2_2_SOURCE_SEL_MASK        (3 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_CRTC1       (0 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_CRTC2       (1 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_RMX         (2 << 10)
+#       define RS400_FP2_2_DVO2_EN          (1 << 25)
+#define RS400_TMDS2_CNTL                    0x0394
+#define RS400_TMDS2_TRANSMITTER_CNTL        0x03a4
+#       define RS400_TMDS2_PLLEN            (1 << 0)
+#       define RS400_TMDS2_PLLRST           (1 << 1)
+
+#define RADEON_GEN_INT_CNTL                 0x0040
+#      define RADEON_SW_INT_ENABLE             (1 << 25)
+#define RADEON_GEN_INT_STATUS               0x0044
+#       define RADEON_VSYNC_INT_AK          (1 <<  2)
+#       define RADEON_VSYNC_INT             (1 <<  2)
+#       define RADEON_VSYNC2_INT_AK         (1 <<  6)
+#       define RADEON_VSYNC2_INT            (1 <<  6)
+#      define RADEON_SW_INT_FIRE               (1 << 26)
+#      define RADEON_SW_INT_TEST               (1 << 25)
+#      define RADEON_SW_INT_TEST_ACK           (1 << 25)
+#define RADEON_GENENB                       0x03c3 /* VGA */
+#define RADEON_GENFC_RD                     0x03ca /* VGA */
+#define RADEON_GENFC_WT                     0x03da /* VGA, 0x03ba */
+#define RADEON_GENMO_RD                     0x03cc /* VGA */
+#define RADEON_GENMO_WT                     0x03c2 /* VGA */
+#define RADEON_GENS0                        0x03c2 /* VGA */
+#define RADEON_GENS1                        0x03da /* VGA, 0x03ba */
+#define RADEON_GPIO_MONID                   0x0068 /* DDC interface via I2C */ /* DDC3 */
+#define RADEON_GPIO_MONIDB                  0x006c
+#define RADEON_GPIO_CRT2_DDC                0x006c
+#define RADEON_GPIO_DVI_DDC                 0x0064 /* DDC2 */
+#define RADEON_GPIO_VGA_DDC                 0x0060 /* DDC1 */
+#       define RADEON_GPIO_A_0              (1 <<  0)
+#       define RADEON_GPIO_A_1              (1 <<  1)
+#       define RADEON_GPIO_Y_0              (1 <<  8)
+#       define RADEON_GPIO_Y_1              (1 <<  9)
+#       define RADEON_GPIO_Y_SHIFT_0        8
+#       define RADEON_GPIO_Y_SHIFT_1        9
+#       define RADEON_GPIO_EN_0             (1 << 16)
+#       define RADEON_GPIO_EN_1             (1 << 17)
+#       define RADEON_GPIO_MASK_0           (1 << 24) /*??*/
+#       define RADEON_GPIO_MASK_1           (1 << 25) /*??*/
+#define RADEON_GRPH8_DATA                   0x03cf /* VGA */
+#define RADEON_GRPH8_IDX                    0x03ce /* VGA */
+#define RADEON_GUI_SCRATCH_REG0             0x15e0
+#define RADEON_GUI_SCRATCH_REG1             0x15e4
+#define RADEON_GUI_SCRATCH_REG2             0x15e8
+#define RADEON_GUI_SCRATCH_REG3             0x15ec
+#define RADEON_GUI_SCRATCH_REG4             0x15f0
+#define RADEON_GUI_SCRATCH_REG5             0x15f4
+
+#define RADEON_HEADER                       0x0f0e /* PCI */
+#define RADEON_HOST_DATA0                   0x17c0
+#define RADEON_HOST_DATA1                   0x17c4
+#define RADEON_HOST_DATA2                   0x17c8
+#define RADEON_HOST_DATA3                   0x17cc
+#define RADEON_HOST_DATA4                   0x17d0
+#define RADEON_HOST_DATA5                   0x17d4
+#define RADEON_HOST_DATA6                   0x17d8
+#define RADEON_HOST_DATA7                   0x17dc
+#define RADEON_HOST_DATA_LAST               0x17e0
+#define RADEON_HOST_PATH_CNTL               0x0130
+#      define RADEON_HP_LIN_RD_CACHE_DIS   (1 << 24)
+#      define RADEON_HDP_READ_BUFFER_INVALIDATE   (1 << 27)
+#       define RADEON_HDP_SOFT_RESET        (1 << 26)
+#       define RADEON_HDP_APER_CNTL         (1 << 23)
+#define RADEON_HTOTAL_CNTL                  0x0009 /* PLL */
+#       define RADEON_HTOT_CNTL_VGA_EN      (1 << 28)
+#define RADEON_HTOTAL2_CNTL                 0x002e /* PLL */
+
+       /* Multimedia I2C bus */
+#define RADEON_I2C_CNTL_0                  0x0090
+#define RADEON_I2C_DONE (1<<0)
+#define RADEON_I2C_NACK (1<<1)
+#define RADEON_I2C_HALT (1<<2)
+#define RADEON_I2C_SOFT_RST (1<<5)
+#define RADEON_I2C_DRIVE_EN (1<<6)
+#define RADEON_I2C_DRIVE_SEL (1<<7)
+#define RADEON_I2C_START (1<<8)
+#define RADEON_I2C_STOP (1<<9)
+#define RADEON_I2C_RECEIVE (1<<10)
+#define RADEON_I2C_ABORT (1<<11)
+#define RADEON_I2C_GO (1<<12)
+#define RADEON_I2C_CNTL_1                   0x0094
+#define RADEON_I2C_SEL         (1<<16)
+#define RADEON_I2C_EN          (1<<17)
+#define RADEON_I2C_DATA                            0x0098
+
+#define RADEON_DVI_I2C_CNTL_0              0x02e0
+#       define R200_DVI_I2C_PIN_SEL(x)      ((x) << 3)
+#       define R200_SEL_DDC1                0 /* 0x60 - VGA_DDC */
+#       define R200_SEL_DDC2                1 /* 0x64 - DVI_DDC */
+#       define R200_SEL_DDC3                2 /* 0x68 - MONID_DDC */
+#define RADEON_DVI_I2C_CNTL_1               0x02e4 /* ? */
+#define RADEON_DVI_I2C_DATA                0x02e8
+
+#define RADEON_INTERRUPT_LINE               0x0f3c /* PCI */
+#define RADEON_INTERRUPT_PIN                0x0f3d /* PCI */
+#define RADEON_IO_BASE                      0x0f14 /* PCI */
+
+#define RADEON_LATENCY                      0x0f0d /* PCI */
+#define RADEON_LEAD_BRES_DEC                0x1608
+#define RADEON_LEAD_BRES_LNTH               0x161c
+#define RADEON_LEAD_BRES_LNTH_SUB           0x1624
+#define RADEON_LVDS_GEN_CNTL                0x02d0
+#       define RADEON_LVDS_ON               (1   <<  0)
+#       define RADEON_LVDS_DISPLAY_DIS      (1   <<  1)
+#       define RADEON_LVDS_PANEL_TYPE       (1   <<  2)
+#       define RADEON_LVDS_PANEL_FORMAT     (1   <<  3)
+#       define RADEON_LVDS_NO_FM            (0   <<  4)
+#       define RADEON_LVDS_2_GREY           (1   <<  4)
+#       define RADEON_LVDS_4_GREY           (2   <<  4)
+#       define RADEON_LVDS_RST_FM           (1   <<  6)
+#       define RADEON_LVDS_EN               (1   <<  7)
+#       define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
+#       define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
+#       define RADEON_LVDS_BL_MOD_EN        (1   << 16)
+#       define RADEON_LVDS_BL_CLK_SEL       (1   << 17)
+#       define RADEON_LVDS_DIGON            (1   << 18)
+#       define RADEON_LVDS_BLON             (1   << 19)
+#       define RADEON_LVDS_FP_POL_LOW       (1   << 20)
+#       define RADEON_LVDS_LP_POL_LOW       (1   << 21)
+#       define RADEON_LVDS_DTM_POL_LOW      (1   << 22)
+#       define RADEON_LVDS_SEL_CRTC2        (1   << 23)
+#       define RADEON_LVDS_FPDI_EN          (1   << 27)
+#       define RADEON_LVDS_HSYNC_DELAY_SHIFT        28
+#define RADEON_LVDS_PLL_CNTL                0x02d4
+#       define RADEON_HSYNC_DELAY_SHIFT     28
+#       define RADEON_HSYNC_DELAY_MASK      (0xf << 28)
+#       define RADEON_LVDS_PLL_EN           (1   << 16)
+#       define RADEON_LVDS_PLL_RESET        (1   << 17)
+#       define R300_LVDS_SRC_SEL_MASK       (3   << 18)
+#       define R300_LVDS_SRC_SEL_CRTC1      (0   << 18)
+#       define R300_LVDS_SRC_SEL_CRTC2      (1   << 18)
+#       define R300_LVDS_SRC_SEL_RMX        (2   << 18)
+#define RADEON_LVDS_SS_GEN_CNTL             0x02ec
+#       define RADEON_LVDS_PWRSEQ_DELAY1_SHIFT     16
+#       define RADEON_LVDS_PWRSEQ_DELAY2_SHIFT     20
+
+#define RADEON_MAX_LATENCY                  0x0f3f /* PCI */
+#define RADEON_DISPLAY_BASE_ADDR            0x23c
+#define RADEON_DISPLAY2_BASE_ADDR           0x33c
+#define RADEON_OV0_BASE_ADDR                0x43c
+#define RADEON_NB_TOM                       0x15c
+#define R300_MC_INIT_MISC_LAT_TIMER         0x180
+#       define R300_MC_DISP0R_INIT_LAT_SHIFT 8
+#       define R300_MC_DISP0R_INIT_LAT_MASK  0xf
+#       define R300_MC_DISP1R_INIT_LAT_SHIFT 12
+#       define R300_MC_DISP1R_INIT_LAT_MASK  0xf
+#define RADEON_MCLK_CNTL                    0x0012 /* PLL */
+#       define RADEON_MCLKA_SRC_SEL_MASK    0x7
+#       define RADEON_FORCEON_MCLKA         (1 << 16)
+#       define RADEON_FORCEON_MCLKB         (1 << 17)
+#       define RADEON_FORCEON_YCLKA         (1 << 18)
+#       define RADEON_FORCEON_YCLKB         (1 << 19)
+#       define RADEON_FORCEON_MC            (1 << 20)
+#       define RADEON_FORCEON_AIC           (1 << 21)
+#       define R300_DISABLE_MC_MCLKA        (1 << 21)
+#       define R300_DISABLE_MC_MCLKB        (1 << 21)
+#define RADEON_MCLK_MISC                    0x001f /* PLL */
+#       define RADEON_MC_MCLK_MAX_DYN_STOP_LAT (1 << 12)
+#       define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
+#       define RADEON_MC_MCLK_DYN_ENABLE    (1 << 14)
+#       define RADEON_IO_MCLK_DYN_ENABLE    (1 << 15)
+#define RADEON_LCD_GPIO_MASK                0x01a0
+#define RADEON_GPIOPAD_EN                   0x01a0
+#define RADEON_LCD_GPIO_Y_REG               0x01a4
+#define RADEON_MDGPIO_A_REG                 0x01ac
+#define RADEON_MDGPIO_EN_REG                0x01b0
+#define RADEON_MDGPIO_MASK                  0x0198
+#define RADEON_GPIOPAD_MASK                 0x0198
+#define RADEON_GPIOPAD_A                   0x019c
+#define RADEON_MDGPIO_Y_REG                 0x01b4
+#define RADEON_MEM_ADDR_CONFIG              0x0148
+#define RADEON_MEM_BASE                     0x0f10 /* PCI */
+#define RADEON_MEM_CNTL                     0x0140
+#       define RADEON_MEM_NUM_CHANNELS_MASK 0x01
+#       define RADEON_MEM_USE_B_CH_ONLY     (1 <<  1)
+#       define RV100_HALF_MODE              (1 <<  3)
+#       define R300_MEM_NUM_CHANNELS_MASK   0x03
+#       define R300_MEM_USE_CD_CH_ONLY      (1 <<  2)
+#define RADEON_MEM_TIMING_CNTL              0x0144 /* EXT_MEM_CNTL */
+#define RADEON_MEM_INIT_LAT_TIMER           0x0154
+#define RADEON_MEM_INTF_CNTL                0x014c
+#define RADEON_MEM_SDRAM_MODE_REG           0x0158
+#       define RADEON_SDRAM_MODE_MASK       0xffff0000
+#       define RADEON_B3MEM_RESET_MASK      0x6fffffff
+#       define RADEON_MEM_CFG_TYPE_DDR      (1 << 30)
+#define RADEON_MEM_STR_CNTL                 0x0150
+#       define RADEON_MEM_PWRUP_COMPL_A     (1 <<  0)
+#       define RADEON_MEM_PWRUP_COMPL_B     (1 <<  1)
+#       define R300_MEM_PWRUP_COMPL_C       (1 <<  2)
+#       define R300_MEM_PWRUP_COMPL_D       (1 <<  3)
+#       define RADEON_MEM_PWRUP_COMPLETE    0x03
+#       define R300_MEM_PWRUP_COMPLETE      0x0f
+#define RADEON_MC_STATUS                    0x0150
+#       define RADEON_MC_IDLE               (1 << 2)
+#       define R300_MC_IDLE                 (1 << 4)
+#define RADEON_MEM_VGA_RP_SEL               0x003c
+#define RADEON_MEM_VGA_WP_SEL               0x0038
+#define RADEON_MIN_GRANT                    0x0f3e /* PCI */
+#define RADEON_MM_DATA                      0x0004
+#define RADEON_MM_INDEX                     0x0000
+#      define RADEON_MM_APER           (1 << 31)
+#define RADEON_MPLL_CNTL                    0x000e /* PLL */
+#define RADEON_MPP_TB_CONFIG                0x01c0 /* ? */
+#define RADEON_MPP_GP_CONFIG                0x01c8 /* ? */
+#define RADEON_SEPROM_CNTL1                 0x01c0
+#       define RADEON_SCK_PRESCALE_SHIFT    24
+#       define RADEON_SCK_PRESCALE_MASK     (0xff << 24)
+#define R300_MC_IND_INDEX                   0x01f8
+#       define R300_MC_IND_ADDR_MASK        0x3f
+#       define R300_MC_IND_WR_EN            (1 << 8)
+#define R300_MC_IND_DATA                    0x01fc
+#define R300_MC_READ_CNTL_AB                0x017c
+#       define R300_MEM_RBS_POSITION_A_MASK 0x03
+#define R300_MC_READ_CNTL_CD_mcind         0x24
+#       define R300_MEM_RBS_POSITION_C_MASK 0x03
+
+#define RADEON_N_VIF_COUNT                  0x0248
+
+#define RADEON_OV0_AUTO_FLIP_CNTL           0x0470
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_NUM        0x00000007
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_REPEAT_FIELD   0x00000008
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_ODD        0x00000010
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_IGNORE_REPEAT_FIELD 0x00000020
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_EOF_TOGGLE     0x00000040
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_VID_PORT_SELECT     0x00000300
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_P1_FIRST_LINE_EVEN  0x00010000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_EVEN_DOWN     0x00040000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_ODD_DOWN      0x00080000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_FIELD_POL_SOURCE    0x00800000
+
+#define RADEON_OV0_COLOUR_CNTL              0x04E0
+#define RADEON_OV0_DEINTERLACE_PATTERN      0x0474
+#define RADEON_OV0_EXCLUSIVE_HORZ           0x0408
+#       define  RADEON_EXCL_HORZ_START_MASK        0x000000ff
+#       define  RADEON_EXCL_HORZ_END_MASK          0x0000ff00
+#       define  RADEON_EXCL_HORZ_BACK_PORCH_MASK   0x00ff0000
+#       define  RADEON_EXCL_HORZ_EXCLUSIVE_EN      0x80000000
+#define RADEON_OV0_EXCLUSIVE_VERT           0x040C
+#       define  RADEON_EXCL_VERT_START_MASK        0x000003ff
+#       define  RADEON_EXCL_VERT_END_MASK          0x03ff0000
+#define RADEON_OV0_FILTER_CNTL              0x04A0
+#       define RADEON_FILTER_PROGRAMMABLE_COEF            0x0
+#       define RADEON_FILTER_HC_COEF_HORZ_Y               0x1
+#       define RADEON_FILTER_HC_COEF_HORZ_UV              0x2
+#       define RADEON_FILTER_HC_COEF_VERT_Y               0x4
+#       define RADEON_FILTER_HC_COEF_VERT_UV              0x8
+#       define RADEON_FILTER_HARDCODED_COEF               0xf
+#       define RADEON_FILTER_COEF_MASK                    0xf
+
+#define RADEON_OV0_FOUR_TAP_COEF_0          0x04B0
+#define RADEON_OV0_FOUR_TAP_COEF_1          0x04B4
+#define RADEON_OV0_FOUR_TAP_COEF_2          0x04B8
+#define RADEON_OV0_FOUR_TAP_COEF_3          0x04BC
+#define RADEON_OV0_FOUR_TAP_COEF_4          0x04C0
+#define RADEON_OV0_FLAG_CNTL                0x04DC
+#define RADEON_OV0_GAMMA_000_00F            0x0d40
+#define RADEON_OV0_GAMMA_010_01F            0x0d44
+#define RADEON_OV0_GAMMA_020_03F            0x0d48
+#define RADEON_OV0_GAMMA_040_07F            0x0d4c
+#define RADEON_OV0_GAMMA_080_0BF            0x0e00
+#define RADEON_OV0_GAMMA_0C0_0FF            0x0e04
+#define RADEON_OV0_GAMMA_100_13F            0x0e08
+#define RADEON_OV0_GAMMA_140_17F            0x0e0c
+#define RADEON_OV0_GAMMA_180_1BF            0x0e10
+#define RADEON_OV0_GAMMA_1C0_1FF            0x0e14
+#define RADEON_OV0_GAMMA_200_23F            0x0e18
+#define RADEON_OV0_GAMMA_240_27F            0x0e1c
+#define RADEON_OV0_GAMMA_280_2BF            0x0e20
+#define RADEON_OV0_GAMMA_2C0_2FF            0x0e24
+#define RADEON_OV0_GAMMA_300_33F            0x0e28
+#define RADEON_OV0_GAMMA_340_37F            0x0e2c
+#define RADEON_OV0_GAMMA_380_3BF            0x0d50
+#define RADEON_OV0_GAMMA_3C0_3FF            0x0d54
+#define RADEON_OV0_GRAPHICS_KEY_CLR_LOW     0x04EC
+#define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH    0x04F0
+#define RADEON_OV0_H_INC                    0x0480
+#define RADEON_OV0_KEY_CNTL                 0x04F4
+#       define  RADEON_VIDEO_KEY_FN_MASK    0x00000003L
+#       define  RADEON_VIDEO_KEY_FN_FALSE   0x00000000L
+#       define  RADEON_VIDEO_KEY_FN_TRUE    0x00000001L
+#       define  RADEON_VIDEO_KEY_FN_EQ      0x00000002L
+#       define  RADEON_VIDEO_KEY_FN_NE      0x00000003L
+#       define  RADEON_GRAPHIC_KEY_FN_MASK  0x00000030L
+#       define  RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L
+#       define  RADEON_GRAPHIC_KEY_FN_TRUE  0x00000010L
+#       define  RADEON_GRAPHIC_KEY_FN_EQ    0x00000020L
+#       define  RADEON_GRAPHIC_KEY_FN_NE    0x00000030L
+#       define  RADEON_CMP_MIX_MASK         0x00000100L
+#       define  RADEON_CMP_MIX_OR           0x00000000L
+#       define  RADEON_CMP_MIX_AND          0x00000100L
+#define RADEON_OV0_LIN_TRANS_A              0x0d20
+#define RADEON_OV0_LIN_TRANS_B              0x0d24
+#define RADEON_OV0_LIN_TRANS_C              0x0d28
+#define RADEON_OV0_LIN_TRANS_D              0x0d2c
+#define RADEON_OV0_LIN_TRANS_E              0x0d30
+#define RADEON_OV0_LIN_TRANS_F              0x0d34
+#define RADEON_OV0_P1_BLANK_LINES_AT_TOP    0x0430
+#       define  RADEON_P1_BLNK_LN_AT_TOP_M1_MASK   0x00000fffL
+#       define  RADEON_P1_ACTIVE_LINES_M1          0x0fff0000L
+#define RADEON_OV0_P1_H_ACCUM_INIT          0x0488
+#define RADEON_OV0_P1_V_ACCUM_INIT          0x0428
+#       define  RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L
+#       define  RADEON_OV0_P1_V_ACCUM_INIT_MASK    0x01ff8000L
+#define RADEON_OV0_P1_X_START_END           0x0494
+#define RADEON_OV0_P2_X_START_END           0x0498
+#define RADEON_OV0_P23_BLANK_LINES_AT_TOP   0x0434
+#       define  RADEON_P23_BLNK_LN_AT_TOP_M1_MASK  0x000007ffL
+#       define  RADEON_P23_ACTIVE_LINES_M1         0x07ff0000L
+#define RADEON_OV0_P23_H_ACCUM_INIT         0x048C
+#define RADEON_OV0_P23_V_ACCUM_INIT         0x042C
+#define RADEON_OV0_P3_X_START_END           0x049C
+#define RADEON_OV0_REG_LOAD_CNTL            0x0410
+#       define  RADEON_REG_LD_CTL_LOCK                 0x00000001L
+#       define  RADEON_REG_LD_CTL_VBLANK_DURING_LOCK   0x00000002L
+#       define  RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L
+#       define  RADEON_REG_LD_CTL_LOCK_READBACK        0x00000008L
+#       define  RADEON_REG_LD_CTL_FLIP_READBACK        0x00000010L
+#define RADEON_OV0_SCALE_CNTL               0x0420
+#       define  RADEON_SCALER_HORZ_PICK_NEAREST    0x00000004L
+#       define  RADEON_SCALER_VERT_PICK_NEAREST    0x00000008L
+#       define  RADEON_SCALER_SIGNED_UV            0x00000010L
+#       define  RADEON_SCALER_GAMMA_SEL_MASK       0x00000060L
+#       define  RADEON_SCALER_GAMMA_SEL_BRIGHT     0x00000000L
+#       define  RADEON_SCALER_GAMMA_SEL_G22        0x00000020L
+#       define  RADEON_SCALER_GAMMA_SEL_G18        0x00000040L
+#       define  RADEON_SCALER_GAMMA_SEL_G14        0x00000060L
+#       define  RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L
+#       define  RADEON_SCALER_SURFAC_FORMAT        0x00000f00L
+#       define  RADEON_SCALER_SOURCE_15BPP         0x00000300L
+#       define  RADEON_SCALER_SOURCE_16BPP         0x00000400L
+#       define  RADEON_SCALER_SOURCE_32BPP         0x00000600L
+#       define  RADEON_SCALER_SOURCE_YUV9          0x00000900L
+#       define  RADEON_SCALER_SOURCE_YUV12         0x00000A00L
+#       define  RADEON_SCALER_SOURCE_VYUY422       0x00000B00L
+#       define  RADEON_SCALER_SOURCE_YVYU422       0x00000C00L
+#       define  RADEON_SCALER_ADAPTIVE_DEINT       0x00001000L
+#       define  RADEON_SCALER_TEMPORAL_DEINT       0x00002000L
+#       define  RADEON_SCALER_CRTC_SEL             0x00004000L
+#       define  RADEON_SCALER_SMART_SWITCH         0x00008000L
+#       define  RADEON_SCALER_BURST_PER_PLANE      0x007F0000L
+#       define  RADEON_SCALER_DOUBLE_BUFFER        0x01000000L
+#       define  RADEON_SCALER_DIS_LIMIT            0x08000000L
+#       define  RADEON_SCALER_LIN_TRANS_BYPASS     0x10000000L
+#       define  RADEON_SCALER_INT_EMU              0x20000000L
+#       define  RADEON_SCALER_ENABLE               0x40000000L
+#       define  RADEON_SCALER_SOFT_RESET           0x80000000L
+#define RADEON_OV0_STEP_BY                  0x0484
+#define RADEON_OV0_TEST                     0x04F8
+#define RADEON_OV0_V_INC                    0x0424
+#define RADEON_OV0_VID_BUF_PITCH0_VALUE     0x0460
+#define RADEON_OV0_VID_BUF_PITCH1_VALUE     0x0464
+#define RADEON_OV0_VID_BUF0_BASE_ADRS       0x0440
+#       define  RADEON_VIF_BUF0_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF0_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF0_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF1_BASE_ADRS       0x0444
+#       define  RADEON_VIF_BUF1_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF1_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF1_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF2_BASE_ADRS       0x0448
+#       define  RADEON_VIF_BUF2_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF2_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF2_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF3_BASE_ADRS       0x044C
+#define RADEON_OV0_VID_BUF4_BASE_ADRS       0x0450
+#define RADEON_OV0_VID_BUF5_BASE_ADRS       0x0454
+#define RADEON_OV0_VIDEO_KEY_CLR_HIGH       0x04E8
+#define RADEON_OV0_VIDEO_KEY_CLR_LOW        0x04E4
+#define RADEON_OV0_Y_X_START                0x0400
+#define RADEON_OV0_Y_X_END                  0x0404
+#define RADEON_OV1_Y_X_START                0x0600
+#define RADEON_OV1_Y_X_END                  0x0604
+#define RADEON_OVR_CLR                      0x0230
+#define RADEON_OVR_WID_LEFT_RIGHT           0x0234
+#define RADEON_OVR_WID_TOP_BOTTOM           0x0238
+
+/* first capture unit */
+
+#define RADEON_CAP0_BUF0_OFFSET           0x0920
+#define RADEON_CAP0_BUF1_OFFSET           0x0924
+#define RADEON_CAP0_BUF0_EVEN_OFFSET      0x0928
+#define RADEON_CAP0_BUF1_EVEN_OFFSET      0x092C
+
+#define RADEON_CAP0_BUF_PITCH             0x0930
+#define RADEON_CAP0_V_WINDOW              0x0934
+#define RADEON_CAP0_H_WINDOW              0x0938
+#define RADEON_CAP0_VBI0_OFFSET           0x093C
+#define RADEON_CAP0_VBI1_OFFSET           0x0940
+#define RADEON_CAP0_VBI_V_WINDOW          0x0944
+#define RADEON_CAP0_VBI_H_WINDOW          0x0948
+#define RADEON_CAP0_PORT_MODE_CNTL        0x094C
+#define RADEON_CAP0_TRIG_CNTL             0x0950
+#define RADEON_CAP0_DEBUG                 0x0954
+#define RADEON_CAP0_CONFIG                0x0958
+#       define RADEON_CAP0_CONFIG_CONTINUOS          0x00000001
+#       define RADEON_CAP0_CONFIG_START_FIELD_EVEN   0x00000002
+#       define RADEON_CAP0_CONFIG_START_BUF_GET      0x00000004
+#       define RADEON_CAP0_CONFIG_START_BUF_SET      0x00000008
+#       define RADEON_CAP0_CONFIG_BUF_TYPE_ALT       0x00000010
+#       define RADEON_CAP0_CONFIG_BUF_TYPE_FRAME     0x00000020
+#       define RADEON_CAP0_CONFIG_ONESHOT_MODE_FRAME 0x00000040
+#       define RADEON_CAP0_CONFIG_BUF_MODE_DOUBLE    0x00000080
+#       define RADEON_CAP0_CONFIG_BUF_MODE_TRIPLE    0x00000100
+#       define RADEON_CAP0_CONFIG_MIRROR_EN          0x00000200
+#       define RADEON_CAP0_CONFIG_ONESHOT_MIRROR_EN  0x00000400
+#       define RADEON_CAP0_CONFIG_VIDEO_SIGNED_UV    0x00000800
+#       define RADEON_CAP0_CONFIG_ANC_DECODE_EN      0x00001000
+#       define RADEON_CAP0_CONFIG_VBI_EN             0x00002000
+#       define RADEON_CAP0_CONFIG_SOFT_PULL_DOWN_EN  0x00004000
+#       define RADEON_CAP0_CONFIG_VIP_EXTEND_FLAG_EN 0x00008000
+#       define RADEON_CAP0_CONFIG_FAKE_FIELD_EN      0x00010000
+#       define RADEON_CAP0_CONFIG_ODD_ONE_MORE_LINE  0x00020000
+#       define RADEON_CAP0_CONFIG_EVEN_ONE_MORE_LINE 0x00040000
+#       define RADEON_CAP0_CONFIG_HORZ_DIVIDE_2      0x00080000
+#       define RADEON_CAP0_CONFIG_HORZ_DIVIDE_4      0x00100000
+#       define RADEON_CAP0_CONFIG_VERT_DIVIDE_2      0x00200000
+#       define RADEON_CAP0_CONFIG_VERT_DIVIDE_4      0x00400000
+#       define RADEON_CAP0_CONFIG_FORMAT_BROOKTREE   0x00000000
+#       define RADEON_CAP0_CONFIG_FORMAT_CCIR656     0x00800000
+#       define RADEON_CAP0_CONFIG_FORMAT_ZV          0x01000000
+#       define RADEON_CAP0_CONFIG_FORMAT_VIP         0x01800000
+#       define RADEON_CAP0_CONFIG_FORMAT_TRANSPORT   0x02000000
+#       define RADEON_CAP0_CONFIG_HORZ_DECIMATOR     0x04000000
+#       define RADEON_CAP0_CONFIG_VIDEO_IN_YVYU422   0x00000000
+#       define RADEON_CAP0_CONFIG_VIDEO_IN_VYUY422   0x20000000
+#       define RADEON_CAP0_CONFIG_VBI_DIVIDE_2       0x40000000
+#       define RADEON_CAP0_CONFIG_VBI_DIVIDE_4       0x80000000
+#define RADEON_CAP0_ANC_ODD_OFFSET        0x095C
+#define RADEON_CAP0_ANC_EVEN_OFFSET       0x0960
+#define RADEON_CAP0_ANC_H_WINDOW          0x0964
+#define RADEON_CAP0_VIDEO_SYNC_TEST       0x0968
+#define RADEON_CAP0_ONESHOT_BUF_OFFSET    0x096C
+#define RADEON_CAP0_BUF_STATUS            0x0970
+/* #define RADEON_CAP0_DWNSC_XRATIO       0x0978 */
+/* #define RADEON_CAP0_XSHARPNESS                 0x097C */
+#define RADEON_CAP0_VBI2_OFFSET           0x0980
+#define RADEON_CAP0_VBI3_OFFSET           0x0984
+#define RADEON_CAP0_ANC2_OFFSET           0x0988
+#define RADEON_CAP0_ANC3_OFFSET           0x098C
+#define RADEON_VID_BUFFER_CONTROL         0x0900
+
+/* second capture unit */
+
+#define RADEON_CAP1_BUF0_OFFSET           0x0990
+#define RADEON_CAP1_BUF1_OFFSET           0x0994
+#define RADEON_CAP1_BUF0_EVEN_OFFSET      0x0998
+#define RADEON_CAP1_BUF1_EVEN_OFFSET      0x099C
+
+#define RADEON_CAP1_BUF_PITCH             0x09A0
+#define RADEON_CAP1_V_WINDOW              0x09A4
+#define RADEON_CAP1_H_WINDOW              0x09A8
+#define RADEON_CAP1_VBI_ODD_OFFSET        0x09AC
+#define RADEON_CAP1_VBI_EVEN_OFFSET       0x09B0
+#define RADEON_CAP1_VBI_V_WINDOW                  0x09B4
+#define RADEON_CAP1_VBI_H_WINDOW                  0x09B8
+#define RADEON_CAP1_PORT_MODE_CNTL        0x09BC
+#define RADEON_CAP1_TRIG_CNTL             0x09C0
+#define RADEON_CAP1_DEBUG                         0x09C4
+#define RADEON_CAP1_CONFIG                0x09C8
+#define RADEON_CAP1_ANC_ODD_OFFSET        0x09CC
+#define RADEON_CAP1_ANC_EVEN_OFFSET       0x09D0
+#define RADEON_CAP1_ANC_H_WINDOW                  0x09D4
+#define RADEON_CAP1_VIDEO_SYNC_TEST       0x09D8
+#define RADEON_CAP1_ONESHOT_BUF_OFFSET    0x09DC
+#define RADEON_CAP1_BUF_STATUS            0x09E0
+#define RADEON_CAP1_DWNSC_XRATIO                  0x09E8
+#define RADEON_CAP1_XSHARPNESS            0x09EC
+
+/* misc multimedia registers */
+
+#define RADEON_IDCT_RUNS                  0x1F80
+#define RADEON_IDCT_LEVELS                0x1F84
+#define RADEON_IDCT_CONTROL               0x1FBC
+#define RADEON_IDCT_AUTH_CONTROL          0x1F88
+#define RADEON_IDCT_AUTH                  0x1F8C
+
+#define RADEON_P2PLL_CNTL                   0x002a /* P2PLL */
+#       define RADEON_P2PLL_RESET                (1 <<  0)
+#       define RADEON_P2PLL_SLEEP                (1 <<  1)
+#       define RADEON_P2PLL_PVG_MASK             (7 << 11)
+#       define RADEON_P2PLL_PVG_SHIFT            11
+#       define RADEON_P2PLL_ATOMIC_UPDATE_EN     (1 << 16)
+#       define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+#       define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC  (1 << 18)
+#define RADEON_P2PLL_DIV_0                  0x002c
+#       define RADEON_P2PLL_FB0_DIV_MASK    0x07ff
+#       define RADEON_P2PLL_POST0_DIV_MASK  0x00070000
+#define RADEON_P2PLL_REF_DIV                0x002B /* PLL */
+#       define RADEON_P2PLL_REF_DIV_MASK    0x03ff
+#       define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
+#       define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
+#       define R300_PPLL_REF_DIV_ACC_MASK   (0x3ff << 18)
+#       define R300_PPLL_REF_DIV_ACC_SHIFT  18
+#define RADEON_PALETTE_DATA                 0x00b4
+#define RADEON_PALETTE_30_DATA              0x00b8
+#define RADEON_PALETTE_INDEX                0x00b0
+#define RADEON_PCI_GART_PAGE                0x017c
+#define RADEON_PIXCLKS_CNTL                 0x002d
+#       define RADEON_PIX2CLK_SRC_SEL_MASK     0x03
+#       define RADEON_PIX2CLK_SRC_SEL_CPUCLK   0x00
+#       define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01
+#       define RADEON_PIX2CLK_SRC_SEL_BYTECLK  0x02
+#       define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03
+#       define RADEON_PIX2CLK_ALWAYS_ONb       (1<<6)
+#       define RADEON_PIX2CLK_DAC_ALWAYS_ONb   (1<<7)
+#       define RADEON_PIXCLK_TV_SRC_SEL        (1 << 8)
+#       define RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb (1 << 9)
+#       define R300_DVOCLK_ALWAYS_ONb          (1 << 10)
+#       define RADEON_PIXCLK_BLEND_ALWAYS_ONb  (1 << 11)
+#       define RADEON_PIXCLK_GV_ALWAYS_ONb     (1 << 12)
+#       define RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb (1 << 13)
+#       define R300_PIXCLK_DVO_ALWAYS_ONb      (1 << 13)
+#       define RADEON_PIXCLK_LVDS_ALWAYS_ONb   (1 << 14)
+#       define RADEON_PIXCLK_TMDS_ALWAYS_ONb   (1 << 15)
+#       define R300_PIXCLK_TRANS_ALWAYS_ONb    (1 << 16)
+#       define R300_PIXCLK_TVO_ALWAYS_ONb      (1 << 17)
+#       define R300_P2G2CLK_ALWAYS_ONb         (1 << 18)
+#       define R300_P2G2CLK_DAC_ALWAYS_ONb     (1 << 19)
+#       define R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF (1 << 23)
+#define RADEON_PLANE_3D_MASK_C              0x1d44
+#define RADEON_PLL_TEST_CNTL                0x0013 /* PLL */
+#       define RADEON_PLL_MASK_READ_B          (1 << 9)
+#define RADEON_PMI_CAP_ID                   0x0f5c /* PCI */
+#define RADEON_PMI_DATA                     0x0f63 /* PCI */
+#define RADEON_PMI_NXT_CAP_PTR              0x0f5d /* PCI */
+#define RADEON_PMI_PMC_REG                  0x0f5e /* PCI */
+#define RADEON_PMI_PMCSR_REG                0x0f60 /* PCI */
+#define RADEON_PMI_REGISTER                 0x0f5c /* PCI */
+#define RADEON_PPLL_CNTL                    0x0002 /* PLL */
+#       define RADEON_PPLL_RESET                (1 <<  0)
+#       define RADEON_PPLL_SLEEP                (1 <<  1)
+#       define RADEON_PPLL_PVG_MASK             (7 << 11)
+#       define RADEON_PPLL_PVG_SHIFT            11
+#       define RADEON_PPLL_ATOMIC_UPDATE_EN     (1 << 16)
+#       define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+#       define RADEON_PPLL_ATOMIC_UPDATE_VSYNC  (1 << 18)
+#define RADEON_PPLL_DIV_0                   0x0004 /* PLL */
+#define RADEON_PPLL_DIV_1                   0x0005 /* PLL */
+#define RADEON_PPLL_DIV_2                   0x0006 /* PLL */
+#define RADEON_PPLL_DIV_3                   0x0007 /* PLL */
+#       define RADEON_PPLL_FB3_DIV_MASK     0x07ff
+#       define RADEON_PPLL_POST3_DIV_MASK   0x00070000
+#define RADEON_PPLL_REF_DIV                 0x0003 /* PLL */
+#       define RADEON_PPLL_REF_DIV_MASK     0x03ff
+#       define RADEON_PPLL_ATOMIC_UPDATE_R  (1 << 15) /* same as _W */
+#       define RADEON_PPLL_ATOMIC_UPDATE_W  (1 << 15) /* same as _R */
+#define RADEON_PWR_MNGMT_CNTL_STATUS        0x0f60 /* PCI */
+
+#define RADEON_RBBM_GUICNTL                 0x172c
+#       define RADEON_HOST_DATA_SWAP_NONE   (0 << 0)
+#       define RADEON_HOST_DATA_SWAP_16BIT  (1 << 0)
+#       define RADEON_HOST_DATA_SWAP_32BIT  (2 << 0)
+#       define RADEON_HOST_DATA_SWAP_HDW    (3 << 0)
+#define RADEON_RBBM_SOFT_RESET              0x00f0
+#       define RADEON_SOFT_RESET_CP         (1 <<  0)
+#       define RADEON_SOFT_RESET_HI         (1 <<  1)
+#       define RADEON_SOFT_RESET_SE         (1 <<  2)
+#       define RADEON_SOFT_RESET_RE         (1 <<  3)
+#       define RADEON_SOFT_RESET_PP         (1 <<  4)
+#       define RADEON_SOFT_RESET_E2         (1 <<  5)
+#       define RADEON_SOFT_RESET_RB         (1 <<  6)
+#       define RADEON_SOFT_RESET_HDP        (1 <<  7)
+#define RADEON_RBBM_STATUS                  0x0e40
+#       define RADEON_RBBM_FIFOCNT_MASK     0x007f
+#       define RADEON_RBBM_ACTIVE           (1 << 31)
+#define RADEON_RB2D_DSTCACHE_CTLSTAT        0x342c
+#       define RADEON_RB2D_DC_FLUSH         (3 << 0)
+#       define RADEON_RB2D_DC_FREE          (3 << 2)
+#       define RADEON_RB2D_DC_FLUSH_ALL     0xf
+#       define RADEON_RB2D_DC_BUSY          (1 << 31)
+#define RADEON_RB2D_DSTCACHE_MODE           0x3428
+#define RADEON_DSTCACHE_CTLSTAT             0x1714
+
+#define RADEON_RB3D_ZCACHE_MODE             0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT          0x3254
+#       define RADEON_RB3D_ZC_FLUSH_ALL     0x5
+#define RADEON_RB3D_DSTCACHE_MODE           0x3258
+# define RADEON_RB3D_DC_CACHE_ENABLE            (0)
+# define RADEON_RB3D_DC_2D_CACHE_DISABLE        (1)
+# define RADEON_RB3D_DC_3D_CACHE_DISABLE        (2)
+# define RADEON_RB3D_DC_CACHE_DISABLE           (3)
+# define RADEON_RB3D_DC_2D_CACHE_LINESIZE_128   (1 << 2)
+# define RADEON_RB3D_DC_3D_CACHE_LINESIZE_128   (2 << 2)
+# define RADEON_RB3D_DC_2D_CACHE_AUTOFLUSH      (1 << 8)
+# define RADEON_RB3D_DC_3D_CACHE_AUTOFLUSH      (2 << 8)
+# define R200_RB3D_DC_2D_CACHE_AUTOFREE         (1 << 10)
+# define R200_RB3D_DC_3D_CACHE_AUTOFREE         (2 << 10)
+# define RADEON_RB3D_DC_FORCE_RMW               (1 << 16)
+# define RADEON_RB3D_DC_DISABLE_RI_FILL         (1 << 24)
+# define RADEON_RB3D_DC_DISABLE_RI_READ         (1 << 25)
+
+#define RADEON_RB3D_DSTCACHE_CTLSTAT            0x325C
+# define RADEON_RB3D_DC_FLUSH                   (3 << 0)
+# define RADEON_RB3D_DC_FREE                    (3 << 2)
+# define RADEON_RB3D_DC_FLUSH_ALL               0xf
+# define RADEON_RB3D_DC_BUSY                    (1 << 31)
+
+#define RADEON_REG_BASE                     0x0f18 /* PCI */
+#define RADEON_REGPROG_INF                  0x0f09 /* PCI */
+#define RADEON_REVISION_ID                  0x0f08 /* PCI */
+
+#define RADEON_SC_BOTTOM                    0x164c
+#define RADEON_SC_BOTTOM_RIGHT              0x16f0
+#define RADEON_SC_BOTTOM_RIGHT_C            0x1c8c
+#define RADEON_SC_LEFT                      0x1640
+#define RADEON_SC_RIGHT                     0x1644
+#define RADEON_SC_TOP                       0x1648
+#define RADEON_SC_TOP_LEFT                  0x16ec
+#define RADEON_SC_TOP_LEFT_C                0x1c88
+#       define RADEON_SC_SIGN_MASK_LO       0x8000
+#       define RADEON_SC_SIGN_MASK_HI       0x80000000
+#define RADEON_M_SPLL_REF_FB_DIV            0x000a /* PLL */
+#      define RADEON_M_SPLL_REF_DIV_SHIFT  0
+#      define RADEON_M_SPLL_REF_DIV_MASK   0xff
+#      define RADEON_MPLL_FB_DIV_SHIFT     8
+#      define RADEON_MPLL_FB_DIV_MASK      0xff
+#      define RADEON_SPLL_FB_DIV_SHIFT     16
+#      define RADEON_SPLL_FB_DIV_MASK      0xff
+#define RADEON_SPLL_CNTL                    0x000c /* PLL */
+#       define RADEON_SPLL_SLEEP            (1 << 0)
+#       define RADEON_SPLL_RESET            (1 << 1)
+#       define RADEON_SPLL_PCP_MASK         0x7
+#       define RADEON_SPLL_PCP_SHIFT        8
+#       define RADEON_SPLL_PVG_MASK         0x7
+#       define RADEON_SPLL_PVG_SHIFT        11
+#       define RADEON_SPLL_PDC_MASK         0x3
+#       define RADEON_SPLL_PDC_SHIFT        14
+#define RADEON_SCLK_CNTL                    0x000d /* PLL */
+#       define RADEON_SCLK_SRC_SEL_MASK     0x0007
+#       define RADEON_DYN_STOP_LAT_MASK     0x00007ff8
+#       define RADEON_CP_MAX_DYN_STOP_LAT   0x0008
+#       define RADEON_SCLK_FORCEON_MASK     0xffff8000
+#       define RADEON_SCLK_FORCE_DISP2      (1<<15)
+#       define RADEON_SCLK_FORCE_CP         (1<<16)
+#       define RADEON_SCLK_FORCE_HDP        (1<<17)
+#       define RADEON_SCLK_FORCE_DISP1      (1<<18)
+#       define RADEON_SCLK_FORCE_TOP        (1<<19)
+#       define RADEON_SCLK_FORCE_E2         (1<<20)
+#       define RADEON_SCLK_FORCE_SE         (1<<21)
+#       define RADEON_SCLK_FORCE_IDCT       (1<<22)
+#       define RADEON_SCLK_FORCE_VIP        (1<<23)
+#       define RADEON_SCLK_FORCE_RE         (1<<24)
+#       define RADEON_SCLK_FORCE_PB         (1<<25)
+#       define RADEON_SCLK_FORCE_TAM        (1<<26)
+#       define RADEON_SCLK_FORCE_TDM        (1<<27)
+#       define RADEON_SCLK_FORCE_RB         (1<<28)
+#       define RADEON_SCLK_FORCE_TV_SCLK    (1<<29)
+#       define RADEON_SCLK_FORCE_SUBPIC     (1<<30)
+#       define RADEON_SCLK_FORCE_OV0        (1<<31)
+#       define R300_SCLK_FORCE_VAP          (1<<21)
+#       define R300_SCLK_FORCE_SR           (1<<25)
+#       define R300_SCLK_FORCE_PX           (1<<26)
+#       define R300_SCLK_FORCE_TX           (1<<27)
+#       define R300_SCLK_FORCE_US           (1<<28)
+#       define R300_SCLK_FORCE_SU           (1<<30)
+#define R300_SCLK_CNTL2                     0x1e   /* PLL */
+#       define R300_SCLK_TCL_MAX_DYN_STOP_LAT (1<<10)
+#       define R300_SCLK_GA_MAX_DYN_STOP_LAT  (1<<11)
+#       define R300_SCLK_CBA_MAX_DYN_STOP_LAT (1<<12)
+#       define R300_SCLK_FORCE_TCL          (1<<13)
+#       define R300_SCLK_FORCE_CBA          (1<<14)
+#       define R300_SCLK_FORCE_GA           (1<<15)
+#define RADEON_SCLK_MORE_CNTL               0x0035 /* PLL */
+#       define RADEON_SCLK_MORE_MAX_DYN_STOP_LAT 0x0007
+#       define RADEON_SCLK_MORE_FORCEON     0x0700
+#define RADEON_SDRAM_MODE_REG               0x0158
+#define RADEON_SEQ8_DATA                    0x03c5 /* VGA */
+#define RADEON_SEQ8_IDX                     0x03c4 /* VGA */
+#define RADEON_SNAPSHOT_F_COUNT             0x0244
+#define RADEON_SNAPSHOT_VH_COUNTS           0x0240
+#define RADEON_SNAPSHOT_VIF_COUNT           0x024c
+#define RADEON_SRC_OFFSET                   0x15ac
+#define RADEON_SRC_PITCH                    0x15b0
+#define RADEON_SRC_PITCH_OFFSET             0x1428
+#define RADEON_SRC_SC_BOTTOM                0x165c
+#define RADEON_SRC_SC_BOTTOM_RIGHT          0x16f4
+#define RADEON_SRC_SC_RIGHT                 0x1654
+#define RADEON_SRC_X                        0x1414
+#define RADEON_SRC_X_Y                      0x1590
+#define RADEON_SRC_Y                        0x1418
+#define RADEON_SRC_Y_X                      0x1434
+#define RADEON_STATUS                       0x0f06 /* PCI */
+#define RADEON_SUBPIC_CNTL                  0x0540 /* ? */
+#define RADEON_SUB_CLASS                    0x0f0a /* PCI */
+#define RADEON_SURFACE_CNTL                 0x0b00
+#       define RADEON_SURF_TRANSLATION_DIS  (1 << 8)
+#       define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20)
+#       define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21)
+#       define RADEON_NONSURF_AP1_SWP_16BPP (1 << 22)
+#       define RADEON_NONSURF_AP1_SWP_32BPP (1 << 23)
+#define RADEON_SURFACE0_INFO                0x0b0c
+#       define RADEON_SURF_TILE_COLOR_MACRO (0 << 16)
+#       define RADEON_SURF_TILE_COLOR_BOTH  (1 << 16)
+#       define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16)
+#       define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16)
+#       define R200_SURF_TILE_NONE          (0 << 16)
+#       define R200_SURF_TILE_COLOR_MACRO   (1 << 16)
+#       define R200_SURF_TILE_COLOR_MICRO   (2 << 16)
+#       define R200_SURF_TILE_COLOR_BOTH    (3 << 16)
+#       define R200_SURF_TILE_DEPTH_32BPP   (4 << 16)
+#       define R200_SURF_TILE_DEPTH_16BPP   (5 << 16)
+#       define R300_SURF_TILE_NONE          (0 << 16)
+#       define R300_SURF_TILE_COLOR_MACRO   (1 << 16)
+#       define R300_SURF_TILE_DEPTH_32BPP   (2 << 16)
+#       define RADEON_SURF_AP0_SWP_16BPP    (1 << 20)
+#       define RADEON_SURF_AP0_SWP_32BPP    (1 << 21)
+#       define RADEON_SURF_AP1_SWP_16BPP    (1 << 22)
+#       define RADEON_SURF_AP1_SWP_32BPP    (1 << 23)
+#define RADEON_SURFACE0_LOWER_BOUND         0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND         0x0b08
+#define RADEON_SURFACE1_INFO                0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND         0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND         0x0b18
+#define RADEON_SURFACE2_INFO                0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND         0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND         0x0b28
+#define RADEON_SURFACE3_INFO                0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND         0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND         0x0b38
+#define RADEON_SURFACE4_INFO                0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND         0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND         0x0b48
+#define RADEON_SURFACE5_INFO                0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND         0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND         0x0b58
+#define RADEON_SURFACE6_INFO                0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND         0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND         0x0b68
+#define RADEON_SURFACE7_INFO                0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND         0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND         0x0b78
+#define RADEON_SW_SEMAPHORE                 0x013c
+
+#define RADEON_TEST_DEBUG_CNTL              0x0120
+#define RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN 0x00000001
+
+#define RADEON_TEST_DEBUG_MUX               0x0124
+#define RADEON_TEST_DEBUG_OUT               0x012c
+#define RADEON_TMDS_PLL_CNTL                0x02a8
+#define RADEON_TMDS_TRANSMITTER_CNTL        0x02a4
+#       define RADEON_TMDS_TRANSMITTER_PLLEN  1
+#       define RADEON_TMDS_TRANSMITTER_PLLRST 2
+#define RADEON_TRAIL_BRES_DEC               0x1614
+#define RADEON_TRAIL_BRES_ERR               0x160c
+#define RADEON_TRAIL_BRES_INC               0x1610
+#define RADEON_TRAIL_X                      0x1618
+#define RADEON_TRAIL_X_SUB                  0x1620
+
+#define RADEON_VCLK_ECP_CNTL                0x0008 /* PLL */
+#       define RADEON_VCLK_SRC_SEL_MASK     0x03
+#       define RADEON_VCLK_SRC_SEL_CPUCLK   0x00
+#       define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01
+#       define RADEON_VCLK_SRC_SEL_BYTECLK  0x02
+#       define RADEON_VCLK_SRC_SEL_PPLLCLK  0x03
+#       define RADEON_PIXCLK_ALWAYS_ONb     (1<<6)
+#       define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7)
+#       define R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF (1<<23)
+
+#define RADEON_VENDOR_ID                    0x0f00 /* PCI */
+#define RADEON_VGA_DDA_CONFIG               0x02e8
+#define RADEON_VGA_DDA_ON_OFF               0x02ec
+#define RADEON_VID_BUFFER_CONTROL           0x0900
+#define RADEON_VIDEOMUX_CNTL                0x0190
+
+/* VIP bus */
+#define RADEON_VIPH_CH0_DATA                0x0c00
+#define RADEON_VIPH_CH1_DATA                0x0c04
+#define RADEON_VIPH_CH2_DATA                0x0c08
+#define RADEON_VIPH_CH3_DATA                0x0c0c
+#define RADEON_VIPH_CH0_ADDR                0x0c10
+#define RADEON_VIPH_CH1_ADDR                0x0c14
+#define RADEON_VIPH_CH2_ADDR                0x0c18
+#define RADEON_VIPH_CH3_ADDR                0x0c1c
+#define RADEON_VIPH_CH0_SBCNT               0x0c20
+#define RADEON_VIPH_CH1_SBCNT               0x0c24
+#define RADEON_VIPH_CH2_SBCNT               0x0c28
+#define RADEON_VIPH_CH3_SBCNT               0x0c2c
+#define RADEON_VIPH_CH0_ABCNT               0x0c30
+#define RADEON_VIPH_CH1_ABCNT               0x0c34
+#define RADEON_VIPH_CH2_ABCNT               0x0c38
+#define RADEON_VIPH_CH3_ABCNT               0x0c3c
+#define RADEON_VIPH_CONTROL                 0x0c40
+#       define RADEON_VIP_BUSY 0
+#       define RADEON_VIP_IDLE 1
+#       define RADEON_VIP_RESET 2
+#       define RADEON_VIPH_EN               (1 << 21)
+#define RADEON_VIPH_DV_LAT                  0x0c44
+#define RADEON_VIPH_BM_CHUNK                0x0c48
+#define RADEON_VIPH_DV_INT                  0x0c4c
+#define RADEON_VIPH_TIMEOUT_STAT            0x0c50
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT 0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK   0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS 0x01000000
+
+#define RADEON_VIPH_REG_DATA                0x0084
+#define RADEON_VIPH_REG_ADDR                0x0080
+
+
+#define RADEON_WAIT_UNTIL                   0x1720
+#       define RADEON_WAIT_CRTC_PFLIP       (1 << 0)
+#       define RADEON_WAIT_RE_CRTC_VLINE    (1 << 1)
+#       define RADEON_WAIT_FE_CRTC_VLINE    (1 << 2)
+#       define RADEON_WAIT_CRTC_VLINE       (1 << 3)
+#       define RADEON_WAIT_DMA_VID_IDLE     (1 << 8)
+#       define RADEON_WAIT_DMA_GUI_IDLE     (1 << 9)
+#       define RADEON_WAIT_CMDFIFO          (1 << 10) /* wait for CMDFIFO_ENTRIES */
+#       define RADEON_WAIT_OV0_FLIP         (1 << 11)
+#       define RADEON_WAIT_AGP_FLUSH        (1 << 13)
+#       define RADEON_WAIT_2D_IDLE          (1 << 14)
+#       define RADEON_WAIT_3D_IDLE          (1 << 15)
+#       define RADEON_WAIT_2D_IDLECLEAN     (1 << 16)
+#       define RADEON_WAIT_3D_IDLECLEAN     (1 << 17)
+#       define RADEON_WAIT_HOST_IDLECLEAN   (1 << 18)
+#       define RADEON_CMDFIFO_ENTRIES_SHIFT 10
+#       define RADEON_CMDFIFO_ENTRIES_MASK  0x7f
+#       define RADEON_WAIT_VAP_IDLE         (1 << 28)
+#       define RADEON_WAIT_BOTH_CRTC_PFLIP  (1 << 30)
+#       define RADEON_ENG_DISPLAY_SELECT_CRTC0    (0 << 31)
+#       define RADEON_ENG_DISPLAY_SELECT_CRTC1    (1 << 31)
+
+#define RADEON_X_MPLL_REF_FB_DIV            0x000a /* PLL */
+#define RADEON_XCLK_CNTL                    0x000d /* PLL */
+#define RADEON_XDLL_CNTL                    0x000c /* PLL */
+#define RADEON_XPLL_CNTL                    0x000b /* PLL */
+
+
+
+                               /* Registers for 3D/TCL */
+#define RADEON_PP_BORDER_COLOR_0            0x1d40
+#define RADEON_PP_BORDER_COLOR_1            0x1d44
+#define RADEON_PP_BORDER_COLOR_2            0x1d48
+#define RADEON_PP_CNTL                      0x1c38
+#       define RADEON_STIPPLE_ENABLE        (1 <<  0)
+#       define RADEON_SCISSOR_ENABLE        (1 <<  1)
+#       define RADEON_PATTERN_ENABLE        (1 <<  2)
+#       define RADEON_SHADOW_ENABLE         (1 <<  3)
+#       define RADEON_TEX_ENABLE_MASK       (0xf << 4)
+#       define RADEON_TEX_0_ENABLE          (1 <<  4)
+#       define RADEON_TEX_1_ENABLE          (1 <<  5)
+#       define RADEON_TEX_2_ENABLE          (1 <<  6)
+#       define RADEON_TEX_3_ENABLE          (1 <<  7)
+#       define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12)
+#       define RADEON_TEX_BLEND_0_ENABLE    (1 << 12)
+#       define RADEON_TEX_BLEND_1_ENABLE    (1 << 13)
+#       define RADEON_TEX_BLEND_2_ENABLE    (1 << 14)
+#       define RADEON_TEX_BLEND_3_ENABLE    (1 << 15)
+#       define RADEON_PLANAR_YUV_ENABLE     (1 << 20)
+#       define RADEON_SPECULAR_ENABLE       (1 << 21)
+#       define RADEON_FOG_ENABLE            (1 << 22)
+#       define RADEON_ALPHA_TEST_ENABLE     (1 << 23)
+#       define RADEON_ANTI_ALIAS_NONE       (0 << 24)
+#       define RADEON_ANTI_ALIAS_LINE       (1 << 24)
+#       define RADEON_ANTI_ALIAS_POLY       (2 << 24)
+#       define RADEON_ANTI_ALIAS_LINE_POLY  (3 << 24)
+#       define RADEON_BUMP_MAP_ENABLE       (1 << 26)
+#       define RADEON_BUMPED_MAP_T0         (0 << 27)
+#       define RADEON_BUMPED_MAP_T1         (1 << 27)
+#       define RADEON_BUMPED_MAP_T2         (2 << 27)
+#       define RADEON_TEX_3D_ENABLE_0       (1 << 29)
+#       define RADEON_TEX_3D_ENABLE_1       (1 << 30)
+#       define RADEON_MC_ENABLE             (1 << 31)
+#define RADEON_PP_FOG_COLOR                 0x1c18
+#       define RADEON_FOG_COLOR_MASK        0x00ffffff
+#       define RADEON_FOG_VERTEX            (0 << 24)
+#       define RADEON_FOG_TABLE             (1 << 24)
+#       define RADEON_FOG_USE_DEPTH         (0 << 25)
+#       define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25)
+#       define RADEON_FOG_USE_SPEC_ALPHA    (3 << 25)
+#define RADEON_PP_LUM_MATRIX                0x1d00
+#define RADEON_PP_MISC                      0x1c14
+#       define RADEON_REF_ALPHA_MASK        0x000000ff
+#       define RADEON_ALPHA_TEST_FAIL       (0 << 8)
+#       define RADEON_ALPHA_TEST_LESS       (1 << 8)
+#       define RADEON_ALPHA_TEST_LEQUAL     (2 << 8)
+#       define RADEON_ALPHA_TEST_EQUAL      (3 << 8)
+#       define RADEON_ALPHA_TEST_GEQUAL     (4 << 8)
+#       define RADEON_ALPHA_TEST_GREATER    (5 << 8)
+#       define RADEON_ALPHA_TEST_NEQUAL     (6 << 8)
+#       define RADEON_ALPHA_TEST_PASS       (7 << 8)
+#       define RADEON_ALPHA_TEST_OP_MASK    (7 << 8)
+#       define RADEON_CHROMA_FUNC_FAIL      (0 << 16)
+#       define RADEON_CHROMA_FUNC_PASS      (1 << 16)
+#       define RADEON_CHROMA_FUNC_NEQUAL    (2 << 16)
+#       define RADEON_CHROMA_FUNC_EQUAL     (3 << 16)
+#       define RADEON_CHROMA_KEY_NEAREST    (0 << 18)
+#       define RADEON_CHROMA_KEY_ZERO       (1 << 18)
+#       define RADEON_SHADOW_ID_AUTO_INC    (1 << 20)
+#       define RADEON_SHADOW_FUNC_EQUAL     (0 << 21)
+#       define RADEON_SHADOW_FUNC_NEQUAL    (1 << 21)
+#       define RADEON_SHADOW_PASS_1         (0 << 22)
+#       define RADEON_SHADOW_PASS_2         (1 << 22)
+#       define RADEON_RIGHT_HAND_CUBE_D3D   (0 << 24)
+#       define RADEON_RIGHT_HAND_CUBE_OGL   (1 << 24)
+#define RADEON_PP_ROT_MATRIX_0              0x1d58
+#define RADEON_PP_ROT_MATRIX_1              0x1d5c
+#define RADEON_PP_TXFILTER_0                0x1c54
+#define RADEON_PP_TXFILTER_1                0x1c6c
+#define RADEON_PP_TXFILTER_2                0x1c84
+#       define RADEON_MAG_FILTER_NEAREST                   (0  <<  0)
+#       define RADEON_MAG_FILTER_LINEAR                    (1  <<  0)
+#       define RADEON_MAG_FILTER_MASK                      (1  <<  0)
+#       define RADEON_MIN_FILTER_NEAREST                   (0  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR                    (1  <<  1)
+#       define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST       (2  <<  1)
+#       define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR        (3  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR         (7  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST             (8  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_LINEAR              (9  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (11 <<  1)
+#       define RADEON_MIN_FILTER_MASK                      (15 <<  1)
+#       define RADEON_MAX_ANISO_1_TO_1                     (0  <<  5)
+#       define RADEON_MAX_ANISO_2_TO_1                     (1  <<  5)
+#       define RADEON_MAX_ANISO_4_TO_1                     (2  <<  5)
+#       define RADEON_MAX_ANISO_8_TO_1                     (3  <<  5)
+#       define RADEON_MAX_ANISO_16_TO_1                    (4  <<  5)
+#       define RADEON_MAX_ANISO_MASK                       (7  <<  5)
+#       define RADEON_LOD_BIAS_MASK                        (0xff <<  8)
+#       define RADEON_LOD_BIAS_SHIFT                       8
+#       define RADEON_MAX_MIP_LEVEL_MASK                   (0x0f << 16)
+#       define RADEON_MAX_MIP_LEVEL_SHIFT                  16
+#       define RADEON_YUV_TO_RGB                           (1  << 20)
+#       define RADEON_YUV_TEMPERATURE_COOL                 (0  << 21)
+#       define RADEON_YUV_TEMPERATURE_HOT                  (1  << 21)
+#       define RADEON_YUV_TEMPERATURE_MASK                 (1  << 21)
+#       define RADEON_WRAPEN_S                             (1  << 22)
+#       define RADEON_CLAMP_S_WRAP                         (0  << 23)
+#       define RADEON_CLAMP_S_MIRROR                       (1  << 23)
+#       define RADEON_CLAMP_S_CLAMP_LAST                   (2  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_LAST            (3  << 23)
+#       define RADEON_CLAMP_S_CLAMP_BORDER                 (4  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER          (5  << 23)
+#       define RADEON_CLAMP_S_CLAMP_GL                     (6  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_GL              (7  << 23)
+#       define RADEON_CLAMP_S_MASK                         (7  << 23)
+#       define RADEON_WRAPEN_T                             (1  << 26)
+#       define RADEON_CLAMP_T_WRAP                         (0  << 27)
+#       define RADEON_CLAMP_T_MIRROR                       (1  << 27)
+#       define RADEON_CLAMP_T_CLAMP_LAST                   (2  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_LAST            (3  << 27)
+#       define RADEON_CLAMP_T_CLAMP_BORDER                 (4  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER          (5  << 27)
+#       define RADEON_CLAMP_T_CLAMP_GL                     (6  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_GL              (7  << 27)
+#       define RADEON_CLAMP_T_MASK                         (7  << 27)
+#       define RADEON_BORDER_MODE_OGL                      (0  << 31)
+#       define RADEON_BORDER_MODE_D3D                      (1  << 31)
+#define RADEON_PP_TXFORMAT_0                0x1c58
+#define RADEON_PP_TXFORMAT_1                0x1c70
+#define RADEON_PP_TXFORMAT_2                0x1c88
+#       define RADEON_TXFORMAT_I8                 (0  <<  0)
+#       define RADEON_TXFORMAT_AI88               (1  <<  0)
+#       define RADEON_TXFORMAT_RGB332             (2  <<  0)
+#       define RADEON_TXFORMAT_ARGB1555           (3  <<  0)
+#       define RADEON_TXFORMAT_RGB565             (4  <<  0)
+#       define RADEON_TXFORMAT_ARGB4444           (5  <<  0)
+#       define RADEON_TXFORMAT_ARGB8888           (6  <<  0)
+#       define RADEON_TXFORMAT_RGBA8888           (7  <<  0)
+#       define RADEON_TXFORMAT_Y8                 (8  <<  0)
+#       define RADEON_TXFORMAT_VYUY422            (10 <<  0)
+#       define RADEON_TXFORMAT_YVYU422            (11 <<  0)
+#       define RADEON_TXFORMAT_DXT1               (12 <<  0)
+#       define RADEON_TXFORMAT_DXT23              (14 <<  0)
+#       define RADEON_TXFORMAT_DXT45              (15 <<  0)
+#       define RADEON_TXFORMAT_FORMAT_MASK        (31 <<  0)
+#       define RADEON_TXFORMAT_FORMAT_SHIFT       0
+#       define RADEON_TXFORMAT_APPLE_YUV_MODE     (1  <<  5)
+#       define RADEON_TXFORMAT_ALPHA_IN_MAP       (1  <<  6)
+#       define RADEON_TXFORMAT_NON_POWER2         (1  <<  7)
+#       define RADEON_TXFORMAT_WIDTH_MASK         (15 <<  8)
+#       define RADEON_TXFORMAT_WIDTH_SHIFT        8
+#       define RADEON_TXFORMAT_HEIGHT_MASK        (15 << 12)
+#       define RADEON_TXFORMAT_HEIGHT_SHIFT       12
+#       define RADEON_TXFORMAT_F5_WIDTH_MASK      (15 << 16)
+#       define RADEON_TXFORMAT_F5_WIDTH_SHIFT     16
+#       define RADEON_TXFORMAT_F5_HEIGHT_MASK     (15 << 20)
+#       define RADEON_TXFORMAT_F5_HEIGHT_SHIFT    20
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ0      (0  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_MASK      (3  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ1      (1  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ2      (2  << 24)
+#       define RADEON_TXFORMAT_ENDIAN_NO_SWAP     (0  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP  (1  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP  (2  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3  << 26)
+#       define RADEON_TXFORMAT_ALPHA_MASK_ENABLE  (1  << 28)
+#       define RADEON_TXFORMAT_CHROMA_KEY_ENABLE  (1  << 29)
+#       define RADEON_TXFORMAT_CUBIC_MAP_ENABLE   (1  << 30)
+#       define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1  << 31)
+#define RADEON_PP_CUBIC_FACES_0             0x1d24
+#define RADEON_PP_CUBIC_FACES_1             0x1d28
+#define RADEON_PP_CUBIC_FACES_2             0x1d2c
+#       define RADEON_FACE_WIDTH_1_SHIFT          0
+#       define RADEON_FACE_HEIGHT_1_SHIFT         4
+#       define RADEON_FACE_WIDTH_1_MASK           (0xf << 0)
+#       define RADEON_FACE_HEIGHT_1_MASK          (0xf << 4)
+#       define RADEON_FACE_WIDTH_2_SHIFT          8
+#       define RADEON_FACE_HEIGHT_2_SHIFT         12
+#       define RADEON_FACE_WIDTH_2_MASK           (0xf << 8)
+#       define RADEON_FACE_HEIGHT_2_MASK          (0xf << 12)
+#       define RADEON_FACE_WIDTH_3_SHIFT          16
+#       define RADEON_FACE_HEIGHT_3_SHIFT         20
+#       define RADEON_FACE_WIDTH_3_MASK           (0xf << 16)
+#       define RADEON_FACE_HEIGHT_3_MASK          (0xf << 20)
+#       define RADEON_FACE_WIDTH_4_SHIFT          24
+#       define RADEON_FACE_HEIGHT_4_SHIFT         28
+#       define RADEON_FACE_WIDTH_4_MASK           (0xf << 24)
+#       define RADEON_FACE_HEIGHT_4_MASK          (0xf << 28)
+
+#define RADEON_PP_TXOFFSET_0                0x1c5c
+#define RADEON_PP_TXOFFSET_1                0x1c74
+#define RADEON_PP_TXOFFSET_2                0x1c8c
+#       define RADEON_TXO_ENDIAN_NO_SWAP     (0 << 0)
+#       define RADEON_TXO_ENDIAN_BYTE_SWAP   (1 << 0)
+#       define RADEON_TXO_ENDIAN_WORD_SWAP   (2 << 0)
+#       define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+#       define RADEON_TXO_MACRO_LINEAR       (0 << 2)
+#       define RADEON_TXO_MACRO_TILE         (1 << 2)
+#       define RADEON_TXO_MICRO_LINEAR       (0 << 3)
+#       define RADEON_TXO_MICRO_TILE_X2      (1 << 3)
+#       define RADEON_TXO_MICRO_TILE_OPT     (2 << 3)
+#       define RADEON_TXO_OFFSET_MASK        0xffffffe0
+#       define RADEON_TXO_OFFSET_SHIFT       5
+
+#define RADEON_PP_CUBIC_OFFSET_T0_0         0x1dd0  /* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T0_1         0x1dd4
+#define RADEON_PP_CUBIC_OFFSET_T0_2         0x1dd8
+#define RADEON_PP_CUBIC_OFFSET_T0_3         0x1ddc
+#define RADEON_PP_CUBIC_OFFSET_T0_4         0x1de0
+#define RADEON_PP_CUBIC_OFFSET_T1_0         0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T1_1         0x1e04
+#define RADEON_PP_CUBIC_OFFSET_T1_2         0x1e08
+#define RADEON_PP_CUBIC_OFFSET_T1_3         0x1e0c
+#define RADEON_PP_CUBIC_OFFSET_T1_4         0x1e10
+#define RADEON_PP_CUBIC_OFFSET_T2_0         0x1e14
+#define RADEON_PP_CUBIC_OFFSET_T2_1         0x1e18
+#define RADEON_PP_CUBIC_OFFSET_T2_2         0x1e1c
+#define RADEON_PP_CUBIC_OFFSET_T2_3         0x1e20
+#define RADEON_PP_CUBIC_OFFSET_T2_4         0x1e24
+
+#define RADEON_PP_TEX_SIZE_0                0x1d04  /* NPOT */
+#define RADEON_PP_TEX_SIZE_1                0x1d0c
+#define RADEON_PP_TEX_SIZE_2                0x1d14
+#       define RADEON_TEX_USIZE_MASK        (0x7ff << 0)
+#       define RADEON_TEX_USIZE_SHIFT       0
+#       define RADEON_TEX_VSIZE_MASK        (0x7ff << 16)
+#       define RADEON_TEX_VSIZE_SHIFT       16
+#       define RADEON_SIGNED_RGB_MASK       (1 << 30)
+#       define RADEON_SIGNED_RGB_SHIFT      30
+#       define RADEON_SIGNED_ALPHA_MASK     (1 << 31)
+#       define RADEON_SIGNED_ALPHA_SHIFT    31
+#define RADEON_PP_TEX_PITCH_0               0x1d08  /* NPOT */
+#define RADEON_PP_TEX_PITCH_1               0x1d10  /* NPOT */
+#define RADEON_PP_TEX_PITCH_2               0x1d18  /* NPOT */
+/* note: bits 13-5: 32 byte aligned stride of texture map */
+
+#define RADEON_PP_TXCBLEND_0                0x1c60
+#define RADEON_PP_TXCBLEND_1                0x1c78
+#define RADEON_PP_TXCBLEND_2                0x1c90
+#       define RADEON_COLOR_ARG_A_SHIFT          0
+#       define RADEON_COLOR_ARG_A_MASK           (0x1f << 0)
+#       define RADEON_COLOR_ARG_A_ZERO           (0    << 0)
+#       define RADEON_COLOR_ARG_A_CURRENT_COLOR  (2    << 0)
+#       define RADEON_COLOR_ARG_A_CURRENT_ALPHA  (3    << 0)
+#       define RADEON_COLOR_ARG_A_DIFFUSE_COLOR  (4    << 0)
+#       define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA  (5    << 0)
+#       define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6    << 0)
+#       define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7    << 0)
+#       define RADEON_COLOR_ARG_A_TFACTOR_COLOR  (8    << 0)
+#       define RADEON_COLOR_ARG_A_TFACTOR_ALPHA  (9    << 0)
+#       define RADEON_COLOR_ARG_A_T0_COLOR       (10   << 0)
+#       define RADEON_COLOR_ARG_A_T0_ALPHA       (11   << 0)
+#       define RADEON_COLOR_ARG_A_T1_COLOR       (12   << 0)
+#       define RADEON_COLOR_ARG_A_T1_ALPHA       (13   << 0)
+#       define RADEON_COLOR_ARG_A_T2_COLOR       (14   << 0)
+#       define RADEON_COLOR_ARG_A_T2_ALPHA       (15   << 0)
+#       define RADEON_COLOR_ARG_A_T3_COLOR       (16   << 0)
+#       define RADEON_COLOR_ARG_A_T3_ALPHA       (17   << 0)
+#       define RADEON_COLOR_ARG_B_SHIFT          5
+#       define RADEON_COLOR_ARG_B_MASK           (0x1f << 5)
+#       define RADEON_COLOR_ARG_B_ZERO           (0    << 5)
+#       define RADEON_COLOR_ARG_B_CURRENT_COLOR  (2    << 5)
+#       define RADEON_COLOR_ARG_B_CURRENT_ALPHA  (3    << 5)
+#       define RADEON_COLOR_ARG_B_DIFFUSE_COLOR  (4    << 5)
+#       define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA  (5    << 5)
+#       define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6    << 5)
+#       define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7    << 5)
+#       define RADEON_COLOR_ARG_B_TFACTOR_COLOR  (8    << 5)
+#       define RADEON_COLOR_ARG_B_TFACTOR_ALPHA  (9    << 5)
+#       define RADEON_COLOR_ARG_B_T0_COLOR       (10   << 5)
+#       define RADEON_COLOR_ARG_B_T0_ALPHA       (11   << 5)
+#       define RADEON_COLOR_ARG_B_T1_COLOR       (12   << 5)
+#       define RADEON_COLOR_ARG_B_T1_ALPHA       (13   << 5)
+#       define RADEON_COLOR_ARG_B_T2_COLOR       (14   << 5)
+#       define RADEON_COLOR_ARG_B_T2_ALPHA       (15   << 5)
+#       define RADEON_COLOR_ARG_B_T3_COLOR       (16   << 5)
+#       define RADEON_COLOR_ARG_B_T3_ALPHA       (17   << 5)
+#       define RADEON_COLOR_ARG_C_SHIFT          10
+#       define RADEON_COLOR_ARG_C_MASK           (0x1f << 10)
+#       define RADEON_COLOR_ARG_C_ZERO           (0    << 10)
+#       define RADEON_COLOR_ARG_C_CURRENT_COLOR  (2    << 10)
+#       define RADEON_COLOR_ARG_C_CURRENT_ALPHA  (3    << 10)
+#       define RADEON_COLOR_ARG_C_DIFFUSE_COLOR  (4    << 10)
+#       define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA  (5    << 10)
+#       define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6    << 10)
+#       define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7    << 10)
+#       define RADEON_COLOR_ARG_C_TFACTOR_COLOR  (8    << 10)
+#       define RADEON_COLOR_ARG_C_TFACTOR_ALPHA  (9    << 10)
+#       define RADEON_COLOR_ARG_C_T0_COLOR       (10   << 10)
+#       define RADEON_COLOR_ARG_C_T0_ALPHA       (11   << 10)
+#       define RADEON_COLOR_ARG_C_T1_COLOR       (12   << 10)
+#       define RADEON_COLOR_ARG_C_T1_ALPHA       (13   << 10)
+#       define RADEON_COLOR_ARG_C_T2_COLOR       (14   << 10)
+#       define RADEON_COLOR_ARG_C_T2_ALPHA       (15   << 10)
+#       define RADEON_COLOR_ARG_C_T3_COLOR       (16   << 10)
+#       define RADEON_COLOR_ARG_C_T3_ALPHA       (17   << 10)
+#       define RADEON_COMP_ARG_A                 (1 << 15)
+#       define RADEON_COMP_ARG_A_SHIFT           15
+#       define RADEON_COMP_ARG_B                 (1 << 16)
+#       define RADEON_COMP_ARG_B_SHIFT           16
+#       define RADEON_COMP_ARG_C                 (1 << 17)
+#       define RADEON_COMP_ARG_C_SHIFT           17
+#       define RADEON_BLEND_CTL_MASK             (7 << 18)
+#       define RADEON_BLEND_CTL_ADD              (0 << 18)
+#       define RADEON_BLEND_CTL_SUBTRACT         (1 << 18)
+#       define RADEON_BLEND_CTL_ADDSIGNED        (2 << 18)
+#       define RADEON_BLEND_CTL_BLEND            (3 << 18)
+#       define RADEON_BLEND_CTL_DOT3             (4 << 18)
+#       define RADEON_SCALE_SHIFT                21
+#       define RADEON_SCALE_MASK                 (3 << 21)
+#       define RADEON_SCALE_1X                   (0 << 21)
+#       define RADEON_SCALE_2X                   (1 << 21)
+#       define RADEON_SCALE_4X                   (2 << 21)
+#       define RADEON_CLAMP_TX                   (1 << 23)
+#       define RADEON_T0_EQ_TCUR                 (1 << 24)
+#       define RADEON_T1_EQ_TCUR                 (1 << 25)
+#       define RADEON_T2_EQ_TCUR                 (1 << 26)
+#       define RADEON_T3_EQ_TCUR                 (1 << 27)
+#       define RADEON_COLOR_ARG_MASK             0x1f
+#       define RADEON_COMP_ARG_SHIFT             15
+#define RADEON_PP_TXABLEND_0                0x1c64
+#define RADEON_PP_TXABLEND_1                0x1c7c
+#define RADEON_PP_TXABLEND_2                0x1c94
+#       define RADEON_ALPHA_ARG_A_SHIFT          0
+#       define RADEON_ALPHA_ARG_A_MASK           (0xf << 0)
+#       define RADEON_ALPHA_ARG_A_ZERO           (0   << 0)
+#       define RADEON_ALPHA_ARG_A_CURRENT_ALPHA  (1   << 0)
+#       define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA  (2   << 0)
+#       define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3   << 0)
+#       define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA  (4   << 0)
+#       define RADEON_ALPHA_ARG_A_T0_ALPHA       (5   << 0)
+#       define RADEON_ALPHA_ARG_A_T1_ALPHA       (6   << 0)
+#       define RADEON_ALPHA_ARG_A_T2_ALPHA       (7   << 0)
+#       define RADEON_ALPHA_ARG_A_T3_ALPHA       (8   << 0)
+#       define RADEON_ALPHA_ARG_B_SHIFT          4
+#       define RADEON_ALPHA_ARG_B_MASK           (0xf << 4)
+#       define RADEON_ALPHA_ARG_B_ZERO           (0   << 4)
+#       define RADEON_ALPHA_ARG_B_CURRENT_ALPHA  (1   << 4)
+#       define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA  (2   << 4)
+#       define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3   << 4)
+#       define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA  (4   << 4)
+#       define RADEON_ALPHA_ARG_B_T0_ALPHA       (5   << 4)
+#       define RADEON_ALPHA_ARG_B_T1_ALPHA       (6   << 4)
+#       define RADEON_ALPHA_ARG_B_T2_ALPHA       (7   << 4)
+#       define RADEON_ALPHA_ARG_B_T3_ALPHA       (8   << 4)
+#       define RADEON_ALPHA_ARG_C_SHIFT          8
+#       define RADEON_ALPHA_ARG_C_MASK           (0xf << 8)
+#       define RADEON_ALPHA_ARG_C_ZERO           (0   << 8)
+#       define RADEON_ALPHA_ARG_C_CURRENT_ALPHA  (1   << 8)
+#       define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA  (2   << 8)
+#       define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3   << 8)
+#       define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA  (4   << 8)
+#       define RADEON_ALPHA_ARG_C_T0_ALPHA       (5   << 8)
+#       define RADEON_ALPHA_ARG_C_T1_ALPHA       (6   << 8)
+#       define RADEON_ALPHA_ARG_C_T2_ALPHA       (7   << 8)
+#       define RADEON_ALPHA_ARG_C_T3_ALPHA       (8   << 8)
+#       define RADEON_DOT_ALPHA_DONT_REPLICATE   (1   << 9)
+#       define RADEON_ALPHA_ARG_MASK             0xf
+
+#define RADEON_PP_TFACTOR_0                 0x1c68
+#define RADEON_PP_TFACTOR_1                 0x1c80
+#define RADEON_PP_TFACTOR_2                 0x1c98
+
+#define RADEON_RB3D_BLENDCNTL               0x1c20
+#       define RADEON_COMB_FCN_MASK                    (3  << 12)
+#       define RADEON_COMB_FCN_ADD_CLAMP               (0  << 12)
+#       define RADEON_COMB_FCN_ADD_NOCLAMP             (1  << 12)
+#       define RADEON_COMB_FCN_SUB_CLAMP               (2  << 12)
+#       define RADEON_COMB_FCN_SUB_NOCLAMP             (3  << 12)
+#       define RADEON_SRC_BLEND_GL_ZERO                (32 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE                 (33 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_COLOR           (34 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
+#       define RADEON_SRC_BLEND_GL_DST_COLOR           (36 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_ALPHA           (38 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
+#       define RADEON_SRC_BLEND_GL_DST_ALPHA           (40 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE  (42 << 16)
+#       define RADEON_SRC_BLEND_MASK                   (63 << 16)
+#       define RADEON_DST_BLEND_GL_ZERO                (32 << 24)
+#       define RADEON_DST_BLEND_GL_ONE                 (33 << 24)
+#       define RADEON_DST_BLEND_GL_SRC_COLOR           (34 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
+#       define RADEON_DST_BLEND_GL_DST_COLOR           (36 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
+#       define RADEON_DST_BLEND_GL_SRC_ALPHA           (38 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
+#       define RADEON_DST_BLEND_GL_DST_ALPHA           (40 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
+#       define RADEON_DST_BLEND_MASK                   (63 << 24)
+#define RADEON_RB3D_CNTL                    0x1c3c
+#       define RADEON_ALPHA_BLEND_ENABLE       (1  <<  0)
+#       define RADEON_PLANE_MASK_ENABLE        (1  <<  1)
+#       define RADEON_DITHER_ENABLE            (1  <<  2)
+#       define RADEON_ROUND_ENABLE             (1  <<  3)
+#       define RADEON_SCALE_DITHER_ENABLE      (1  <<  4)
+#       define RADEON_DITHER_INIT              (1  <<  5)
+#       define RADEON_ROP_ENABLE               (1  <<  6)
+#       define RADEON_STENCIL_ENABLE           (1  <<  7)
+#       define RADEON_Z_ENABLE                 (1  <<  8)
+#       define RADEON_DEPTH_XZ_OFFEST_ENABLE   (1  <<  9)
+#       define RADEON_RB3D_COLOR_FORMAT_SHIFT  10
+
+#       define RADEON_COLOR_FORMAT_ARGB1555    3
+#       define RADEON_COLOR_FORMAT_RGB565      4
+#       define RADEON_COLOR_FORMAT_ARGB8888    6
+#       define RADEON_COLOR_FORMAT_RGB332      7
+#       define RADEON_COLOR_FORMAT_Y8          8
+#       define RADEON_COLOR_FORMAT_RGB8        9
+#       define RADEON_COLOR_FORMAT_YUV422_VYUY 11
+#       define RADEON_COLOR_FORMAT_YUV422_YVYU 12
+#       define RADEON_COLOR_FORMAT_aYUV444     14
+#       define RADEON_COLOR_FORMAT_ARGB4444    15
+
+#       define RADEON_CLRCMP_FLIP_ENABLE       (1  << 14)
+#define RADEON_RB3D_COLOROFFSET             0x1c40
+#       define RADEON_COLOROFFSET_MASK      0xfffffff0
+#define RADEON_RB3D_COLORPITCH              0x1c48
+#       define RADEON_COLORPITCH_MASK         0x000001ff8
+#       define RADEON_COLOR_TILE_ENABLE       (1 << 16)
+#       define RADEON_COLOR_MICROTILE_ENABLE  (1 << 17)
+#       define RADEON_COLOR_ENDIAN_NO_SWAP    (0 << 18)
+#       define RADEON_COLOR_ENDIAN_WORD_SWAP  (1 << 18)
+#       define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_DEPTHOFFSET             0x1c24
+#define RADEON_RB3D_DEPTHPITCH              0x1c28
+#       define RADEON_DEPTHPITCH_MASK         0x00001ff8
+#       define RADEON_DEPTH_ENDIAN_NO_SWAP    (0 << 18)
+#       define RADEON_DEPTH_ENDIAN_WORD_SWAP  (1 << 18)
+#       define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_PLANEMASK               0x1d84
+#define RADEON_RB3D_ROPCNTL                 0x1d80
+#       define RADEON_ROP_MASK              (15 << 8)
+#       define RADEON_ROP_CLEAR             (0  << 8)
+#       define RADEON_ROP_NOR               (1  << 8)
+#       define RADEON_ROP_AND_INVERTED      (2  << 8)
+#       define RADEON_ROP_COPY_INVERTED     (3  << 8)
+#       define RADEON_ROP_AND_REVERSE       (4  << 8)
+#       define RADEON_ROP_INVERT            (5  << 8)
+#       define RADEON_ROP_XOR               (6  << 8)
+#       define RADEON_ROP_NAND              (7  << 8)
+#       define RADEON_ROP_AND               (8  << 8)
+#       define RADEON_ROP_EQUIV             (9  << 8)
+#       define RADEON_ROP_NOOP              (10 << 8)
+#       define RADEON_ROP_OR_INVERTED       (11 << 8)
+#       define RADEON_ROP_COPY              (12 << 8)
+#       define RADEON_ROP_OR_REVERSE        (13 << 8)
+#       define RADEON_ROP_OR                (14 << 8)
+#       define RADEON_ROP_SET               (15 << 8)
+#define RADEON_RB3D_STENCILREFMASK          0x1d7c
+#       define RADEON_STENCIL_REF_SHIFT       0
+#       define RADEON_STENCIL_REF_MASK        (0xff << 0)
+#       define RADEON_STENCIL_MASK_SHIFT      16
+#       define RADEON_STENCIL_VALUE_MASK      (0xff << 16)
+#       define RADEON_STENCIL_WRITEMASK_SHIFT 24
+#       define RADEON_STENCIL_WRITE_MASK      (0xff << 24)
+#define RADEON_RB3D_ZSTENCILCNTL            0x1c2c
+#       define RADEON_DEPTH_FORMAT_MASK          (0xf << 0)
+#       define RADEON_DEPTH_FORMAT_16BIT_INT_Z   (0  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_INT_Z   (2  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_INT_Z   (4  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5  <<  0)
+#       define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 <<  0)
+#       define RADEON_Z_TEST_NEVER               (0  <<  4)
+#       define RADEON_Z_TEST_LESS                (1  <<  4)
+#       define RADEON_Z_TEST_LEQUAL              (2  <<  4)
+#       define RADEON_Z_TEST_EQUAL               (3  <<  4)
+#       define RADEON_Z_TEST_GEQUAL              (4  <<  4)
+#       define RADEON_Z_TEST_GREATER             (5  <<  4)
+#       define RADEON_Z_TEST_NEQUAL              (6  <<  4)
+#       define RADEON_Z_TEST_ALWAYS              (7  <<  4)
+#       define RADEON_Z_TEST_MASK                (7  <<  4)
+#       define RADEON_STENCIL_TEST_NEVER         (0  << 12)
+#       define RADEON_STENCIL_TEST_LESS          (1  << 12)
+#       define RADEON_STENCIL_TEST_LEQUAL        (2  << 12)
+#       define RADEON_STENCIL_TEST_EQUAL         (3  << 12)
+#       define RADEON_STENCIL_TEST_GEQUAL        (4  << 12)
+#       define RADEON_STENCIL_TEST_GREATER       (5  << 12)
+#       define RADEON_STENCIL_TEST_NEQUAL        (6  << 12)
+#       define RADEON_STENCIL_TEST_ALWAYS        (7  << 12)
+#       define RADEON_STENCIL_TEST_MASK          (0x7 << 12)
+#       define RADEON_STENCIL_FAIL_KEEP          (0  << 16)
+#       define RADEON_STENCIL_FAIL_ZERO          (1  << 16)
+#       define RADEON_STENCIL_FAIL_REPLACE       (2  << 16)
+#       define RADEON_STENCIL_FAIL_INC           (3  << 16)
+#       define RADEON_STENCIL_FAIL_DEC           (4  << 16)
+#       define RADEON_STENCIL_FAIL_INVERT        (5  << 16)
+#       define RADEON_STENCIL_FAIL_MASK          (0x7 << 16)
+#       define RADEON_STENCIL_ZPASS_KEEP         (0  << 20)
+#       define RADEON_STENCIL_ZPASS_ZERO         (1  << 20)
+#       define RADEON_STENCIL_ZPASS_REPLACE      (2  << 20)
+#       define RADEON_STENCIL_ZPASS_INC          (3  << 20)
+#       define RADEON_STENCIL_ZPASS_DEC          (4  << 20)
+#       define RADEON_STENCIL_ZPASS_INVERT       (5  << 20)
+#       define RADEON_STENCIL_ZPASS_MASK         (0x7 << 20)
+#       define RADEON_STENCIL_ZFAIL_KEEP         (0  << 24)
+#       define RADEON_STENCIL_ZFAIL_ZERO         (1  << 24)
+#       define RADEON_STENCIL_ZFAIL_REPLACE      (2  << 24)
+#       define RADEON_STENCIL_ZFAIL_INC          (3  << 24)
+#       define RADEON_STENCIL_ZFAIL_DEC          (4  << 24)
+#       define RADEON_STENCIL_ZFAIL_INVERT       (5  << 24)
+#       define RADEON_STENCIL_ZFAIL_MASK         (0x7 << 24)
+#       define RADEON_Z_COMPRESSION_ENABLE       (1  << 28)
+#       define RADEON_FORCE_Z_DIRTY              (1  << 29)
+#       define RADEON_Z_WRITE_ENABLE             (1  << 30)
+#define RADEON_RE_LINE_PATTERN              0x1cd0
+#       define RADEON_LINE_PATTERN_MASK             0x0000ffff
+#       define RADEON_LINE_REPEAT_COUNT_SHIFT       16
+#       define RADEON_LINE_PATTERN_START_SHIFT      24
+#       define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28)
+#       define RADEON_LINE_PATTERN_BIG_BIT_ORDER    (1 << 28)
+#       define RADEON_LINE_PATTERN_AUTO_RESET       (1 << 29)
+#define RADEON_RE_LINE_STATE                0x1cd4
+#       define RADEON_LINE_CURRENT_PTR_SHIFT   0
+#       define RADEON_LINE_CURRENT_COUNT_SHIFT 8
+#define RADEON_RE_MISC                      0x26c4
+#       define RADEON_STIPPLE_COORD_MASK       0x1f
+#       define RADEON_STIPPLE_X_OFFSET_SHIFT   0
+#       define RADEON_STIPPLE_X_OFFSET_MASK    (0x1f << 0)
+#       define RADEON_STIPPLE_Y_OFFSET_SHIFT   8
+#       define RADEON_STIPPLE_Y_OFFSET_MASK    (0x1f << 8)
+#       define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16)
+#       define RADEON_STIPPLE_BIG_BIT_ORDER    (1 << 16)
+#define RADEON_RE_SOLID_COLOR               0x1c1c
+#define RADEON_RE_TOP_LEFT                  0x26c0
+#       define RADEON_RE_LEFT_SHIFT         0
+#       define RADEON_RE_TOP_SHIFT          16
+#define RADEON_RE_WIDTH_HEIGHT              0x1c44
+#       define RADEON_RE_WIDTH_SHIFT        0
+#       define RADEON_RE_HEIGHT_SHIFT       16
+
+#define RADEON_SE_CNTL                      0x1c4c
+#       define RADEON_FFACE_CULL_CW          (0 <<  0)
+#       define RADEON_FFACE_CULL_CCW         (1 <<  0)
+#       define RADEON_FFACE_CULL_DIR_MASK    (1 <<  0)
+#       define RADEON_BFACE_CULL             (0 <<  1)
+#       define RADEON_BFACE_SOLID            (3 <<  1)
+#       define RADEON_FFACE_CULL             (0 <<  3)
+#       define RADEON_FFACE_SOLID            (3 <<  3)
+#       define RADEON_FFACE_CULL_MASK        (3 <<  3)
+#       define RADEON_BADVTX_CULL_DISABLE    (1 <<  5)
+#       define RADEON_FLAT_SHADE_VTX_0       (0 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_1       (1 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_2       (2 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_LAST    (3 <<  6)
+#       define RADEON_DIFFUSE_SHADE_SOLID    (0 <<  8)
+#       define RADEON_DIFFUSE_SHADE_FLAT     (1 <<  8)
+#       define RADEON_DIFFUSE_SHADE_GOURAUD  (2 <<  8)
+#       define RADEON_DIFFUSE_SHADE_MASK     (3 <<  8)
+#       define RADEON_ALPHA_SHADE_SOLID      (0 << 10)
+#       define RADEON_ALPHA_SHADE_FLAT       (1 << 10)
+#       define RADEON_ALPHA_SHADE_GOURAUD    (2 << 10)
+#       define RADEON_ALPHA_SHADE_MASK       (3 << 10)
+#       define RADEON_SPECULAR_SHADE_SOLID   (0 << 12)
+#       define RADEON_SPECULAR_SHADE_FLAT    (1 << 12)
+#       define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
+#       define RADEON_SPECULAR_SHADE_MASK    (3 << 12)
+#       define RADEON_FOG_SHADE_SOLID        (0 << 14)
+#       define RADEON_FOG_SHADE_FLAT         (1 << 14)
+#       define RADEON_FOG_SHADE_GOURAUD      (2 << 14)
+#       define RADEON_FOG_SHADE_MASK         (3 << 14)
+#       define RADEON_ZBIAS_ENABLE_POINT     (1 << 16)
+#       define RADEON_ZBIAS_ENABLE_LINE      (1 << 17)
+#       define RADEON_ZBIAS_ENABLE_TRI       (1 << 18)
+#       define RADEON_WIDELINE_ENABLE        (1 << 20)
+#       define RADEON_VPORT_XY_XFORM_ENABLE  (1 << 24)
+#       define RADEON_VPORT_Z_XFORM_ENABLE   (1 << 25)
+#       define RADEON_VTX_PIX_CENTER_D3D     (0 << 27)
+#       define RADEON_VTX_PIX_CENTER_OGL     (1 << 27)
+#       define RADEON_ROUND_MODE_TRUNC       (0 << 28)
+#       define RADEON_ROUND_MODE_ROUND       (1 << 28)
+#       define RADEON_ROUND_MODE_ROUND_EVEN  (2 << 28)
+#       define RADEON_ROUND_MODE_ROUND_ODD   (3 << 28)
+#       define RADEON_ROUND_PREC_16TH_PIX    (0 << 30)
+#       define RADEON_ROUND_PREC_8TH_PIX     (1 << 30)
+#       define RADEON_ROUND_PREC_4TH_PIX     (2 << 30)
+#       define RADEON_ROUND_PREC_HALF_PIX    (3 << 30)
+#define R200_RE_CNTL                           0x1c50
+#       define R200_STIPPLE_ENABLE             0x1
+#       define R200_SCISSOR_ENABLE             0x2
+#       define R200_PATTERN_ENABLE             0x4
+#       define R200_PERSPECTIVE_ENABLE         0x8
+#       define R200_POINT_SMOOTH               0x20
+#       define R200_VTX_STQ0_D3D               0x00010000
+#       define R200_VTX_STQ1_D3D               0x00040000
+#       define R200_VTX_STQ2_D3D               0x00100000
+#       define R200_VTX_STQ3_D3D               0x00400000
+#       define R200_VTX_STQ4_D3D               0x01000000
+#       define R200_VTX_STQ5_D3D               0x04000000
+#define RADEON_SE_CNTL_STATUS               0x2140
+#       define RADEON_VC_NO_SWAP            (0 << 0)
+#       define RADEON_VC_16BIT_SWAP         (1 << 0)
+#       define RADEON_VC_32BIT_SWAP         (2 << 0)
+#       define RADEON_VC_HALF_DWORD_SWAP    (3 << 0)
+#       define RADEON_TCL_BYPASS            (1 << 8)
+#define RADEON_SE_COORD_FMT                 0x1c50
+#       define RADEON_VTX_XY_PRE_MULT_1_OVER_W0  (1 <<  0)
+#       define RADEON_VTX_Z_PRE_MULT_1_OVER_W0   (1 <<  1)
+#       define RADEON_VTX_ST0_NONPARAMETRIC      (1 <<  8)
+#       define RADEON_VTX_ST1_NONPARAMETRIC      (1 <<  9)
+#       define RADEON_VTX_ST2_NONPARAMETRIC      (1 << 10)
+#       define RADEON_VTX_ST3_NONPARAMETRIC      (1 << 11)
+#       define RADEON_VTX_W0_NORMALIZE           (1 << 12)
+#       define RADEON_VTX_W0_IS_NOT_1_OVER_W0    (1 << 16)
+#       define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17)
+#       define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19)
+#       define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21)
+#       define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23)
+#       define RADEON_TEX1_W_ROUTING_USE_W0      (0 << 26)
+#       define RADEON_TEX1_W_ROUTING_USE_Q1      (1 << 26)
+#define RADEON_SE_LINE_WIDTH                0x1db8
+#define RADEON_SE_TCL_LIGHT_MODEL_CTL       0x226c
+#       define RADEON_LIGHTING_ENABLE              (1 << 0)
+#       define RADEON_LIGHT_IN_MODELSPACE          (1 << 1)
+#       define RADEON_LOCAL_VIEWER                 (1 << 2)
+#       define RADEON_NORMALIZE_NORMALS            (1 << 3)
+#       define RADEON_RESCALE_NORMALS              (1 << 4)
+#       define RADEON_SPECULAR_LIGHTS              (1 << 5)
+#       define RADEON_DIFFUSE_SPECULAR_COMBINE     (1 << 6)
+#       define RADEON_LIGHT_ALPHA                  (1 << 7)
+#       define RADEON_LOCAL_LIGHT_VEC_GL           (1 << 8)
+#       define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9)
+#       define RADEON_LM_SOURCE_STATE_PREMULT      0
+#       define RADEON_LM_SOURCE_STATE_MULT         1
+#       define RADEON_LM_SOURCE_VERTEX_DIFFUSE     2
+#       define RADEON_LM_SOURCE_VERTEX_SPECULAR    3
+#       define RADEON_EMISSIVE_SOURCE_SHIFT        16
+#       define RADEON_AMBIENT_SOURCE_SHIFT         18
+#       define RADEON_DIFFUSE_SOURCE_SHIFT         20
+#       define RADEON_SPECULAR_SOURCE_SHIFT        22
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_RED     0x2220
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN   0x2224
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE    0x2228
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA   0x222c
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED     0x2230
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN   0x2234
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE    0x2238
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA   0x223c
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED   0x2210
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE  0x2218
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_RED    0x2240
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN  0x2244
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE   0x2248
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA  0x224c
+#define RADEON_SE_TCL_MATRIX_SELECT_0       0x225c
+#       define RADEON_MODELVIEW_0_SHIFT        0
+#       define RADEON_MODELVIEW_1_SHIFT        4
+#       define RADEON_MODELVIEW_2_SHIFT        8
+#       define RADEON_MODELVIEW_3_SHIFT        12
+#       define RADEON_IT_MODELVIEW_0_SHIFT     16
+#       define RADEON_IT_MODELVIEW_1_SHIFT     20
+#       define RADEON_IT_MODELVIEW_2_SHIFT     24
+#       define RADEON_IT_MODELVIEW_3_SHIFT     28
+#define RADEON_SE_TCL_MATRIX_SELECT_1       0x2260
+#       define RADEON_MODELPROJECT_0_SHIFT     0
+#       define RADEON_MODELPROJECT_1_SHIFT     4
+#       define RADEON_MODELPROJECT_2_SHIFT     8
+#       define RADEON_MODELPROJECT_3_SHIFT     12
+#       define RADEON_TEXMAT_0_SHIFT           16
+#       define RADEON_TEXMAT_1_SHIFT           20
+#       define RADEON_TEXMAT_2_SHIFT           24
+#       define RADEON_TEXMAT_3_SHIFT           28
+
+
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT        0x2254
+#       define RADEON_TCL_VTX_W0                 (1 <<  0)
+#       define RADEON_TCL_VTX_FP_DIFFUSE         (1 <<  1)
+#       define RADEON_TCL_VTX_FP_ALPHA           (1 <<  2)
+#       define RADEON_TCL_VTX_PK_DIFFUSE         (1 <<  3)
+#       define RADEON_TCL_VTX_FP_SPEC            (1 <<  4)
+#       define RADEON_TCL_VTX_FP_FOG             (1 <<  5)
+#       define RADEON_TCL_VTX_PK_SPEC            (1 <<  6)
+#       define RADEON_TCL_VTX_ST0                (1 <<  7)
+#       define RADEON_TCL_VTX_ST1                (1 <<  8)
+#       define RADEON_TCL_VTX_Q1                 (1 <<  9)
+#       define RADEON_TCL_VTX_ST2                (1 << 10)
+#       define RADEON_TCL_VTX_Q2                 (1 << 11)
+#       define RADEON_TCL_VTX_ST3                (1 << 12)
+#       define RADEON_TCL_VTX_Q3                 (1 << 13)
+#       define RADEON_TCL_VTX_Q0                 (1 << 14)
+#       define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15
+#       define RADEON_TCL_VTX_NORM0              (1 << 18)
+#       define RADEON_TCL_VTX_XY1                (1 << 27)
+#       define RADEON_TCL_VTX_Z1                 (1 << 28)
+#       define RADEON_TCL_VTX_W1                 (1 << 29)
+#       define RADEON_TCL_VTX_NORM1              (1 << 30)
+#       define RADEON_TCL_VTX_Z0                 (1 << 31)
+
+#define RADEON_SE_TCL_OUTPUT_VTX_SEL        0x2258
+#       define RADEON_TCL_COMPUTE_XYZW           (1 << 0)
+#       define RADEON_TCL_COMPUTE_DIFFUSE        (1 << 1)
+#       define RADEON_TCL_COMPUTE_SPECULAR       (1 << 2)
+#       define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3)
+#       define RADEON_TCL_FORCE_INORDER_PROC     (1 << 4)
+#       define RADEON_TCL_TEX_INPUT_TEX_0        0
+#       define RADEON_TCL_TEX_INPUT_TEX_1        1
+#       define RADEON_TCL_TEX_INPUT_TEX_2        2
+#       define RADEON_TCL_TEX_INPUT_TEX_3        3
+#       define RADEON_TCL_TEX_COMPUTED_TEX_0     8
+#       define RADEON_TCL_TEX_COMPUTED_TEX_1     9
+#       define RADEON_TCL_TEX_COMPUTED_TEX_2     10
+#       define RADEON_TCL_TEX_COMPUTED_TEX_3     11
+#       define RADEON_TCL_TEX_0_OUTPUT_SHIFT     16
+#       define RADEON_TCL_TEX_1_OUTPUT_SHIFT     20
+#       define RADEON_TCL_TEX_2_OUTPUT_SHIFT     24
+#       define RADEON_TCL_TEX_3_OUTPUT_SHIFT     28
+
+#define RADEON_SE_TCL_PER_LIGHT_CTL_0       0x2270
+#       define RADEON_LIGHT_0_ENABLE               (1 <<  0)
+#       define RADEON_LIGHT_0_ENABLE_AMBIENT       (1 <<  1)
+#       define RADEON_LIGHT_0_ENABLE_SPECULAR      (1 <<  2)
+#       define RADEON_LIGHT_0_IS_LOCAL             (1 <<  3)
+#       define RADEON_LIGHT_0_IS_SPOT              (1 <<  4)
+#       define RADEON_LIGHT_0_DUAL_CONE            (1 <<  5)
+#       define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN   (1 <<  6)
+#       define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 <<  7)
+#       define RADEON_LIGHT_0_SHIFT                0
+#       define RADEON_LIGHT_1_ENABLE               (1 << 16)
+#       define RADEON_LIGHT_1_ENABLE_AMBIENT       (1 << 17)
+#       define RADEON_LIGHT_1_ENABLE_SPECULAR      (1 << 18)
+#       define RADEON_LIGHT_1_IS_LOCAL             (1 << 19)
+#       define RADEON_LIGHT_1_IS_SPOT              (1 << 20)
+#       define RADEON_LIGHT_1_DUAL_CONE            (1 << 21)
+#       define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN   (1 << 22)
+#       define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23)
+#       define RADEON_LIGHT_1_SHIFT                16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_1       0x2274
+#       define RADEON_LIGHT_2_SHIFT            0
+#       define RADEON_LIGHT_3_SHIFT            16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_2       0x2278
+#       define RADEON_LIGHT_4_SHIFT            0
+#       define RADEON_LIGHT_5_SHIFT            16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_3       0x227c
+#       define RADEON_LIGHT_6_SHIFT            0
+#       define RADEON_LIGHT_7_SHIFT            16
+
+#define RADEON_SE_TCL_SHININESS             0x2250
+
+#define RADEON_SE_TCL_TEXTURE_PROC_CTL      0x2268
+#       define RADEON_TEXGEN_TEXMAT_0_ENABLE      (1 << 0)
+#       define RADEON_TEXGEN_TEXMAT_1_ENABLE      (1 << 1)
+#       define RADEON_TEXGEN_TEXMAT_2_ENABLE      (1 << 2)
+#       define RADEON_TEXGEN_TEXMAT_3_ENABLE      (1 << 3)
+#       define RADEON_TEXMAT_0_ENABLE             (1 << 4)
+#       define RADEON_TEXMAT_1_ENABLE             (1 << 5)
+#       define RADEON_TEXMAT_2_ENABLE             (1 << 6)
+#       define RADEON_TEXMAT_3_ENABLE             (1 << 7)
+#       define RADEON_TEXGEN_INPUT_MASK           0xf
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_0     0
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_1     1
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_2     2
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_3     3
+#       define RADEON_TEXGEN_INPUT_OBJ            4
+#       define RADEON_TEXGEN_INPUT_EYE            5
+#       define RADEON_TEXGEN_INPUT_EYE_NORMAL     6
+#       define RADEON_TEXGEN_INPUT_EYE_REFLECT    7
+#       define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8
+#       define RADEON_TEXGEN_0_INPUT_SHIFT        16
+#       define RADEON_TEXGEN_1_INPUT_SHIFT        20
+#       define RADEON_TEXGEN_2_INPUT_SHIFT        24
+#       define RADEON_TEXGEN_3_INPUT_SHIFT        28
+
+#define RADEON_SE_TCL_UCP_VERT_BLEND_CTL    0x2264
+#       define RADEON_UCP_IN_CLIP_SPACE            (1 <<  0)
+#       define RADEON_UCP_IN_MODEL_SPACE           (1 <<  1)
+#       define RADEON_UCP_ENABLE_0                 (1 <<  2)
+#       define RADEON_UCP_ENABLE_1                 (1 <<  3)
+#       define RADEON_UCP_ENABLE_2                 (1 <<  4)
+#       define RADEON_UCP_ENABLE_3                 (1 <<  5)
+#       define RADEON_UCP_ENABLE_4                 (1 <<  6)
+#       define RADEON_UCP_ENABLE_5                 (1 <<  7)
+#       define RADEON_TCL_FOG_MASK                 (3 <<  8)
+#       define RADEON_TCL_FOG_DISABLE              (0 <<  8)
+#       define RADEON_TCL_FOG_EXP                  (1 <<  8)
+#       define RADEON_TCL_FOG_EXP2                 (2 <<  8)
+#       define RADEON_TCL_FOG_LINEAR               (3 <<  8)
+#       define RADEON_RNG_BASED_FOG                (1 << 10)
+#       define RADEON_LIGHT_TWOSIDE                (1 << 11)
+#       define RADEON_BLEND_OP_COUNT_MASK          (7 << 12)
+#       define RADEON_BLEND_OP_COUNT_SHIFT         12
+#       define RADEON_POSITION_BLEND_OP_ENABLE     (1 << 16)
+#       define RADEON_NORMAL_BLEND_OP_ENABLE       (1 << 17)
+#       define RADEON_VERTEX_BLEND_SRC_0_PRIMARY   (1 << 18)
+#       define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18)
+#       define RADEON_VERTEX_BLEND_SRC_1_PRIMARY   (1 << 19)
+#       define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19)
+#       define RADEON_VERTEX_BLEND_SRC_2_PRIMARY   (1 << 20)
+#       define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20)
+#       define RADEON_VERTEX_BLEND_SRC_3_PRIMARY   (1 << 21)
+#       define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21)
+#       define RADEON_VERTEX_BLEND_WGT_MINUS_ONE   (1 << 22)
+#       define RADEON_CULL_FRONT_IS_CW             (0 << 28)
+#       define RADEON_CULL_FRONT_IS_CCW            (1 << 28)
+#       define RADEON_CULL_FRONT                   (1 << 29)
+#       define RADEON_CULL_BACK                    (1 << 30)
+#       define RADEON_FORCE_W_TO_ONE               (1 << 31)
+
+#define RADEON_SE_VPORT_XSCALE              0x1d98
+#define RADEON_SE_VPORT_XOFFSET             0x1d9c
+#define RADEON_SE_VPORT_YSCALE              0x1da0
+#define RADEON_SE_VPORT_YOFFSET             0x1da4
+#define RADEON_SE_VPORT_ZSCALE              0x1da8
+#define RADEON_SE_VPORT_ZOFFSET             0x1dac
+#define RADEON_SE_ZBIAS_FACTOR              0x1db0
+#define RADEON_SE_ZBIAS_CONSTANT            0x1db4
+
+#define RADEON_SE_VTX_FMT                   0x2080
+#       define RADEON_SE_VTX_FMT_XY         0x00000000
+#       define RADEON_SE_VTX_FMT_W0         0x00000001
+#       define RADEON_SE_VTX_FMT_FPCOLOR    0x00000002
+#       define RADEON_SE_VTX_FMT_FPALPHA    0x00000004
+#       define RADEON_SE_VTX_FMT_PKCOLOR    0x00000008
+#       define RADEON_SE_VTX_FMT_FPSPEC     0x00000010
+#       define RADEON_SE_VTX_FMT_FPFOG      0x00000020
+#       define RADEON_SE_VTX_FMT_PKSPEC     0x00000040
+#       define RADEON_SE_VTX_FMT_ST0        0x00000080
+#       define RADEON_SE_VTX_FMT_ST1        0x00000100
+#       define RADEON_SE_VTX_FMT_Q1         0x00000200
+#       define RADEON_SE_VTX_FMT_ST2        0x00000400
+#       define RADEON_SE_VTX_FMT_Q2         0x00000800
+#       define RADEON_SE_VTX_FMT_ST3        0x00001000
+#       define RADEON_SE_VTX_FMT_Q3         0x00002000
+#       define RADEON_SE_VTX_FMT_Q0         0x00004000
+#       define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK  0x00038000
+#       define RADEON_SE_VTX_FMT_N0         0x00040000
+#       define RADEON_SE_VTX_FMT_XY1        0x08000000
+#       define RADEON_SE_VTX_FMT_Z1         0x10000000
+#       define RADEON_SE_VTX_FMT_W1         0x20000000
+#       define RADEON_SE_VTX_FMT_N1         0x40000000
+#       define RADEON_SE_VTX_FMT_Z          0x80000000
+
+#define RADEON_SE_VF_CNTL                             0x2084
+#       define RADEON_VF_PRIM_TYPE_POINT_LIST         1
+#       define RADEON_VF_PRIM_TYPE_LINE_LIST          2
+#       define RADEON_VF_PRIM_TYPE_LINE_STRIP         3
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_LIST      4
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_FAN       5
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_STRIP     6
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_FLAG      7
+#       define RADEON_VF_PRIM_TYPE_RECTANGLE_LIST     8
+#       define RADEON_VF_PRIM_TYPE_POINT_LIST_3       9
+#       define RADEON_VF_PRIM_TYPE_LINE_LIST_3        10
+#       define RADEON_VF_PRIM_TYPE_SPIRIT_LIST        11
+#       define RADEON_VF_PRIM_TYPE_LINE_LOOP          12
+#       define RADEON_VF_PRIM_TYPE_QUAD_LIST          13
+#       define RADEON_VF_PRIM_TYPE_QUAD_STRIP         14
+#       define RADEON_VF_PRIM_TYPE_POLYGON            15
+#       define RADEON_VF_PRIM_WALK_STATE              (0<<4)
+#       define RADEON_VF_PRIM_WALK_INDEX              (1<<4)
+#       define RADEON_VF_PRIM_WALK_LIST               (2<<4)
+#       define RADEON_VF_PRIM_WALK_DATA               (3<<4)
+#       define RADEON_VF_COLOR_ORDER_RGBA             (1<<6)
+#       define RADEON_VF_RADEON_MODE                  (1<<8)
+#       define RADEON_VF_TCL_OUTPUT_CTL_ENA           (1<<9)
+#       define RADEON_VF_PROG_STREAM_ENA              (1<<10)
+#       define RADEON_VF_INDEX_SIZE_SHIFT             11
+#       define RADEON_VF_NUM_VERTICES_SHIFT           16
+
+#define RADEON_SE_PORT_DATA0                   0x2000
+
+#define R200_SE_VAP_CNTL                       0x2080
+#       define R200_VAP_TCL_ENABLE             0x00000001
+#       define R200_VAP_SINGLE_BUF_STATE_ENABLE        0x00000010
+#       define R200_VAP_FORCE_W_TO_ONE         0x00010000
+#       define R200_VAP_D3D_TEX_DEFAULT                0x00020000
+#       define R200_VAP_VF_MAX_VTX_NUM__SHIFT  18
+#       define R200_VAP_VF_MAX_VTX_NUM         (9 << 18)
+#       define R200_VAP_DX_CLIP_SPACE_DEF      0x00400000
+#define R200_VF_MAX_VTX_INDX                   0x210c
+#define R200_VF_MIN_VTX_INDX                   0x2110
+#define R200_SE_VTE_CNTL                       0x20b0
+#       define R200_VPORT_X_SCALE_ENA                  0x00000001
+#       define R200_VPORT_X_OFFSET_ENA                 0x00000002
+#       define R200_VPORT_Y_SCALE_ENA                  0x00000004
+#       define R200_VPORT_Y_OFFSET_ENA                 0x00000008
+#       define R200_VPORT_Z_SCALE_ENA                  0x00000010
+#       define R200_VPORT_Z_OFFSET_ENA                 0x00000020
+#       define R200_VTX_XY_FMT                         0x00000100
+#       define R200_VTX_Z_FMT                          0x00000200
+#       define R200_VTX_W0_FMT                         0x00000400
+#       define R200_VTX_W0_NORMALIZE                   0x00000800
+#       define R200_VTX_ST_DENORMALIZED                0x00001000
+#define R200_SE_VAP_CNTL_STATUS                        0x2140
+#       define R200_VC_NO_SWAP                 (0 << 0)
+#       define R200_VC_16BIT_SWAP              (1 << 0)
+#       define R200_VC_32BIT_SWAP              (2 << 0)
+#define R200_PP_TXFILTER_0                     0x2c00
+#define R200_PP_TXFILTER_1                     0x2c20
+#define R200_PP_TXFILTER_2                     0x2c40
+#define R200_PP_TXFILTER_3                     0x2c60
+#define R200_PP_TXFILTER_4                     0x2c80
+#define R200_PP_TXFILTER_5                     0x2ca0
+#       define R200_MAG_FILTER_NEAREST         (0  <<  0)
+#       define R200_MAG_FILTER_LINEAR          (1  <<  0)
+#       define R200_MAG_FILTER_MASK            (1  <<  0)
+#       define R200_MIN_FILTER_NEAREST         (0  <<  1)
+#       define R200_MIN_FILTER_LINEAR          (1  <<  1)
+#       define R200_MIN_FILTER_NEAREST_MIP_NEAREST (2  <<  1)
+#       define R200_MIN_FILTER_NEAREST_MIP_LINEAR (3  <<  1)
+#       define R200_MIN_FILTER_LINEAR_MIP_NEAREST (6  <<  1)
+#       define R200_MIN_FILTER_LINEAR_MIP_LINEAR (7  <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST   (8  <<  1)
+#       define R200_MIN_FILTER_ANISO_LINEAR    (9  <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 <<  1)
+#       define R200_MIN_FILTER_MASK            (15 <<  1)
+#       define R200_MAX_ANISO_1_TO_1           (0  <<  5)
+#       define R200_MAX_ANISO_2_TO_1           (1  <<  5)
+#       define R200_MAX_ANISO_4_TO_1           (2  <<  5)
+#       define R200_MAX_ANISO_8_TO_1           (3  <<  5)
+#       define R200_MAX_ANISO_16_TO_1          (4  <<  5)
+#       define R200_MAX_ANISO_MASK             (7  <<  5)
+#       define R200_MAX_MIP_LEVEL_MASK         (0x0f << 16)
+#       define R200_MAX_MIP_LEVEL_SHIFT                16
+#       define R200_YUV_TO_RGB                 (1  << 20)
+#       define R200_YUV_TEMPERATURE_COOL       (0  << 21)
+#       define R200_YUV_TEMPERATURE_HOT                (1  << 21)
+#       define R200_YUV_TEMPERATURE_MASK       (1  << 21)
+#       define R200_WRAPEN_S                   (1  << 22)
+#       define R200_CLAMP_S_WRAP               (0  << 23)
+#       define R200_CLAMP_S_MIRROR             (1  << 23)
+#       define R200_CLAMP_S_CLAMP_LAST         (2  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_LAST  (3  << 23)
+#       define R200_CLAMP_S_CLAMP_BORDER       (4  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_BORDER        (5  << 23)
+#       define R200_CLAMP_S_CLAMP_GL           (6  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_GL    (7  << 23)
+#       define R200_CLAMP_S_MASK               (7  << 23)
+#       define R200_WRAPEN_T                   (1  << 26)
+#       define R200_CLAMP_T_WRAP               (0  << 27)
+#       define R200_CLAMP_T_MIRROR             (1  << 27)
+#       define R200_CLAMP_T_CLAMP_LAST         (2  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_LAST  (3  << 27)
+#       define R200_CLAMP_T_CLAMP_BORDER       (4  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_BORDER        (5  << 27)
+#       define R200_CLAMP_T_CLAMP_GL           (6  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_GL    (7  << 27)
+#       define R200_CLAMP_T_MASK               (7  << 27)
+#       define R200_KILL_LT_ZERO               (1  << 30)
+#       define R200_BORDER_MODE_OGL            (0  << 31)
+#       define R200_BORDER_MODE_D3D            (1  << 31)
+#define R200_PP_TXFORMAT_0                     0x2c04
+#define R200_PP_TXFORMAT_1                     0x2c24
+#define R200_PP_TXFORMAT_2                     0x2c44
+#define R200_PP_TXFORMAT_3                     0x2c64
+#define R200_PP_TXFORMAT_4                     0x2c84
+#define R200_PP_TXFORMAT_5                     0x2ca4
+#       define R200_TXFORMAT_I8                        (0 << 0)
+#       define R200_TXFORMAT_AI88              (1 << 0)
+#       define R200_TXFORMAT_RGB332            (2 << 0)
+#       define R200_TXFORMAT_ARGB1555          (3 << 0)
+#       define R200_TXFORMAT_RGB565            (4 << 0)
+#       define R200_TXFORMAT_ARGB4444          (5 << 0)
+#       define R200_TXFORMAT_ARGB8888          (6 << 0)
+#       define R200_TXFORMAT_RGBA8888          (7 << 0)
+#       define R200_TXFORMAT_Y8                        (8 << 0)
+#       define R200_TXFORMAT_AVYU4444          (9 << 0)
+#       define R200_TXFORMAT_VYUY422           (10 << 0)
+#       define R200_TXFORMAT_YVYU422           (11 << 0)
+#       define R200_TXFORMAT_DXT1              (12 << 0)
+#       define R200_TXFORMAT_DXT23             (14 << 0)
+#       define R200_TXFORMAT_DXT45             (15 << 0)
+#       define R200_TXFORMAT_ABGR8888          (22 << 0)
+#       define R200_TXFORMAT_FORMAT_MASK       (31 <<  0)
+#       define R200_TXFORMAT_FORMAT_SHIFT      0
+#       define R200_TXFORMAT_ALPHA_IN_MAP      (1 << 6)
+#       define R200_TXFORMAT_NON_POWER2                (1 << 7)
+#       define R200_TXFORMAT_WIDTH_MASK                (15 <<  8)
+#       define R200_TXFORMAT_WIDTH_SHIFT       8
+#       define R200_TXFORMAT_HEIGHT_MASK       (15 << 12)
+#       define R200_TXFORMAT_HEIGHT_SHIFT      12
+#       define R200_TXFORMAT_F5_WIDTH_MASK     (15 << 16)      /* cube face 5 */
+#       define R200_TXFORMAT_F5_WIDTH_SHIFT    16
+#       define R200_TXFORMAT_F5_HEIGHT_MASK    (15 << 20)
+#       define R200_TXFORMAT_F5_HEIGHT_SHIFT   20
+#       define R200_TXFORMAT_ST_ROUTE_STQ0     (0 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ1     (1 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ2     (2 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ3     (3 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ4     (4 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ5     (5 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_MASK     (7 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_SHIFT    24
+#       define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
+#       define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
+#       define R200_TXFORMAT_CUBIC_MAP_ENABLE          (1 << 30)
+#define R200_PP_TXFORMAT_X_0                    0x2c08
+#define R200_PP_TXFORMAT_X_1                    0x2c28
+#define R200_PP_TXFORMAT_X_2                    0x2c48
+#define R200_PP_TXFORMAT_X_3                    0x2c68
+#define R200_PP_TXFORMAT_X_4                    0x2c88
+#define R200_PP_TXFORMAT_X_5                    0x2ca8
+
+#define R200_PP_TXSIZE_0                       0x2c0c /* NPOT only */
+#define R200_PP_TXSIZE_1                       0x2c2c /* NPOT only */
+#define R200_PP_TXSIZE_2                       0x2c4c /* NPOT only */
+#define R200_PP_TXSIZE_3                       0x2c6c /* NPOT only */
+#define R200_PP_TXSIZE_4                       0x2c8c /* NPOT only */
+#define R200_PP_TXSIZE_5                       0x2cac /* NPOT only */
+
+#define R200_PP_TXPITCH_0                       0x2c10 /* NPOT only */
+#define R200_PP_TXPITCH_1                      0x2c30 /* NPOT only */
+#define R200_PP_TXPITCH_2                      0x2c50 /* NPOT only */
+#define R200_PP_TXPITCH_3                      0x2c70 /* NPOT only */
+#define R200_PP_TXPITCH_4                      0x2c90 /* NPOT only */
+#define R200_PP_TXPITCH_5                      0x2cb0 /* NPOT only */
+
+#define R200_PP_TXOFFSET_0                     0x2d00
+#       define R200_TXO_ENDIAN_NO_SWAP         (0 << 0)
+#       define R200_TXO_ENDIAN_BYTE_SWAP       (1 << 0)
+#       define R200_TXO_ENDIAN_WORD_SWAP       (2 << 0)
+#       define R200_TXO_ENDIAN_HALFDW_SWAP     (3 << 0)
+#       define R200_TXO_MACRO_LINEAR           (0 << 2)
+#       define R200_TXO_MACRO_TILE             (1 << 2)
+#       define R200_TXO_MICRO_LINEAR           (0 << 3)
+#       define R200_TXO_MICRO_TILE             (1 << 3)
+#       define R200_TXO_OFFSET_MASK            0xffffffe0
+#       define R200_TXO_OFFSET_SHIFT           5
+#define R200_PP_TXOFFSET_1                     0x2d18
+#define R200_PP_TXOFFSET_2                     0x2d30
+#define R200_PP_TXOFFSET_3                     0x2d48
+#define R200_PP_TXOFFSET_4                     0x2d60
+#define R200_PP_TXOFFSET_5                     0x2d78
+
+#define R200_PP_TFACTOR_0                      0x2ee0
+#define R200_PP_TFACTOR_1                      0x2ee4
+#define R200_PP_TFACTOR_2                      0x2ee8
+#define R200_PP_TFACTOR_3                      0x2eec
+#define R200_PP_TFACTOR_4                      0x2ef0
+#define R200_PP_TFACTOR_5                      0x2ef4
+
+#define R200_PP_TXCBLEND_0                     0x2f00
+#       define R200_TXC_ARG_A_ZERO             (0)
+#       define R200_TXC_ARG_A_CURRENT_COLOR    (2)
+#       define R200_TXC_ARG_A_CURRENT_ALPHA    (3)
+#       define R200_TXC_ARG_A_DIFFUSE_COLOR    (4)
+#       define R200_TXC_ARG_A_DIFFUSE_ALPHA    (5)
+#       define R200_TXC_ARG_A_SPECULAR_COLOR   (6)
+#       define R200_TXC_ARG_A_SPECULAR_ALPHA   (7)
+#       define R200_TXC_ARG_A_TFACTOR_COLOR    (8)
+#       define R200_TXC_ARG_A_TFACTOR_ALPHA    (9)
+#       define R200_TXC_ARG_A_R0_COLOR         (10)
+#       define R200_TXC_ARG_A_R0_ALPHA         (11)
+#       define R200_TXC_ARG_A_R1_COLOR         (12)
+#       define R200_TXC_ARG_A_R1_ALPHA         (13)
+#       define R200_TXC_ARG_A_R2_COLOR         (14)
+#       define R200_TXC_ARG_A_R2_ALPHA         (15)
+#       define R200_TXC_ARG_A_R3_COLOR         (16)
+#       define R200_TXC_ARG_A_R3_ALPHA         (17)
+#       define R200_TXC_ARG_A_R4_COLOR         (18)
+#       define R200_TXC_ARG_A_R4_ALPHA         (19)
+#       define R200_TXC_ARG_A_R5_COLOR         (20)
+#       define R200_TXC_ARG_A_R5_ALPHA         (21)
+#       define R200_TXC_ARG_A_TFACTOR1_COLOR   (26)
+#       define R200_TXC_ARG_A_TFACTOR1_ALPHA   (27)
+#       define R200_TXC_ARG_A_MASK             (31 << 0)
+#       define R200_TXC_ARG_A_SHIFT            0
+#       define R200_TXC_ARG_B_ZERO             (0 << 5)
+#       define R200_TXC_ARG_B_CURRENT_COLOR    (2 << 5)
+#       define R200_TXC_ARG_B_CURRENT_ALPHA    (3 << 5)
+#       define R200_TXC_ARG_B_DIFFUSE_COLOR    (4 << 5)
+#       define R200_TXC_ARG_B_DIFFUSE_ALPHA    (5 << 5)
+#       define R200_TXC_ARG_B_SPECULAR_COLOR   (6 << 5)
+#       define R200_TXC_ARG_B_SPECULAR_ALPHA   (7 << 5)
+#       define R200_TXC_ARG_B_TFACTOR_COLOR    (8 << 5)
+#       define R200_TXC_ARG_B_TFACTOR_ALPHA    (9 << 5)
+#       define R200_TXC_ARG_B_R0_COLOR         (10 << 5)
+#       define R200_TXC_ARG_B_R0_ALPHA         (11 << 5)
+#       define R200_TXC_ARG_B_R1_COLOR         (12 << 5)
+#       define R200_TXC_ARG_B_R1_ALPHA         (13 << 5)
+#       define R200_TXC_ARG_B_R2_COLOR         (14 << 5)
+#       define R200_TXC_ARG_B_R2_ALPHA         (15 << 5)
+#       define R200_TXC_ARG_B_R3_COLOR         (16 << 5)
+#       define R200_TXC_ARG_B_R3_ALPHA         (17 << 5)
+#       define R200_TXC_ARG_B_R4_COLOR         (18 << 5)
+#       define R200_TXC_ARG_B_R4_ALPHA         (19 << 5)
+#       define R200_TXC_ARG_B_R5_COLOR         (20 << 5)
+#       define R200_TXC_ARG_B_R5_ALPHA         (21 << 5)
+#       define R200_TXC_ARG_B_TFACTOR1_COLOR   (26 << 5)
+#       define R200_TXC_ARG_B_TFACTOR1_ALPHA   (27 << 5)
+#       define R200_TXC_ARG_B_MASK             (31 << 5)
+#       define R200_TXC_ARG_B_SHIFT            5
+#       define R200_TXC_ARG_C_ZERO             (0 << 10)
+#       define R200_TXC_ARG_C_CURRENT_COLOR    (2 << 10)
+#       define R200_TXC_ARG_C_CURRENT_ALPHA    (3 << 10)
+#       define R200_TXC_ARG_C_DIFFUSE_COLOR    (4 << 10)
+#       define R200_TXC_ARG_C_DIFFUSE_ALPHA    (5 << 10)
+#       define R200_TXC_ARG_C_SPECULAR_COLOR   (6 << 10)
+#       define R200_TXC_ARG_C_SPECULAR_ALPHA   (7 << 10)
+#       define R200_TXC_ARG_C_TFACTOR_COLOR    (8 << 10)
+#       define R200_TXC_ARG_C_TFACTOR_ALPHA    (9 << 10)
+#       define R200_TXC_ARG_C_R0_COLOR         (10 << 10)
+#       define R200_TXC_ARG_C_R0_ALPHA         (11 << 10)
+#       define R200_TXC_ARG_C_R1_COLOR         (12 << 10)
+#       define R200_TXC_ARG_C_R1_ALPHA         (13 << 10)
+#       define R200_TXC_ARG_C_R2_COLOR         (14 << 10)
+#       define R200_TXC_ARG_C_R2_ALPHA         (15 << 10)
+#       define R200_TXC_ARG_C_R3_COLOR         (16 << 10)
+#       define R200_TXC_ARG_C_R3_ALPHA         (17 << 10)
+#       define R200_TXC_ARG_C_R4_COLOR         (18 << 10)
+#       define R200_TXC_ARG_C_R4_ALPHA         (19 << 10)
+#       define R200_TXC_ARG_C_R5_COLOR         (20 << 10)
+#       define R200_TXC_ARG_C_R5_ALPHA         (21 << 10)
+#       define R200_TXC_ARG_C_TFACTOR1_COLOR   (26 << 10)
+#       define R200_TXC_ARG_C_TFACTOR1_ALPHA   (27 << 10)
+#       define R200_TXC_ARG_C_MASK             (31 << 10)
+#       define R200_TXC_ARG_C_SHIFT            10
+#       define R200_TXC_COMP_ARG_A             (1 << 16)
+#       define R200_TXC_COMP_ARG_A_SHIFT       (16)
+#       define R200_TXC_BIAS_ARG_A             (1 << 17)
+#       define R200_TXC_SCALE_ARG_A            (1 << 18)
+#       define R200_TXC_NEG_ARG_A              (1 << 19)
+#       define R200_TXC_COMP_ARG_B             (1 << 20)
+#       define R200_TXC_COMP_ARG_B_SHIFT       (20)
+#       define R200_TXC_BIAS_ARG_B             (1 << 21)
+#       define R200_TXC_SCALE_ARG_B            (1 << 22)
+#       define R200_TXC_NEG_ARG_B              (1 << 23)
+#       define R200_TXC_COMP_ARG_C             (1 << 24)
+#       define R200_TXC_COMP_ARG_C_SHIFT       (24)
+#       define R200_TXC_BIAS_ARG_C             (1 << 25)
+#       define R200_TXC_SCALE_ARG_C            (1 << 26)
+#       define R200_TXC_NEG_ARG_C              (1 << 27)
+#       define R200_TXC_OP_MADD                        (0 << 28)
+#       define R200_TXC_OP_CND0                        (2 << 28)
+#       define R200_TXC_OP_LERP                        (3 << 28)
+#       define R200_TXC_OP_DOT3                        (4 << 28)
+#       define R200_TXC_OP_DOT4                        (5 << 28)
+#       define R200_TXC_OP_CONDITIONAL         (6 << 28)
+#       define R200_TXC_OP_DOT2_ADD            (7 << 28)
+#       define R200_TXC_OP_MASK                        (7 << 28)
+#define R200_PP_TXCBLEND2_0            0x2f04
+#       define R200_TXC_TFACTOR_SEL_SHIFT      0
+#       define R200_TXC_TFACTOR_SEL_MASK       0x7
+#       define R200_TXC_TFACTOR1_SEL_SHIFT     4
+#       define R200_TXC_TFACTOR1_SEL_MASK      (0x7 << 4)
+#       define R200_TXC_SCALE_SHIFT            8
+#       define R200_TXC_SCALE_MASK             (7 << 8)
+#       define R200_TXC_SCALE_1X               (0 << 8)
+#       define R200_TXC_SCALE_2X               (1 << 8)
+#       define R200_TXC_SCALE_4X               (2 << 8)
+#       define R200_TXC_SCALE_8X               (3 << 8)
+#       define R200_TXC_SCALE_INV2             (5 << 8)
+#       define R200_TXC_SCALE_INV4             (6 << 8)
+#       define R200_TXC_SCALE_INV8             (7 << 8)
+#       define R200_TXC_CLAMP_SHIFT            12
+#       define R200_TXC_CLAMP_MASK             (3 << 12)
+#       define R200_TXC_CLAMP_WRAP             (0 << 12)
+#       define R200_TXC_CLAMP_0_1              (1 << 12)
+#       define R200_TXC_CLAMP_8_8              (2 << 12)
+#       define R200_TXC_OUTPUT_REG_MASK                (7 << 16)
+#       define R200_TXC_OUTPUT_REG_NONE                (0 << 16)
+#       define R200_TXC_OUTPUT_REG_R0          (1 << 16)
+#       define R200_TXC_OUTPUT_REG_R1          (2 << 16)
+#       define R200_TXC_OUTPUT_REG_R2          (3 << 16)
+#       define R200_TXC_OUTPUT_REG_R3          (4 << 16)
+#       define R200_TXC_OUTPUT_REG_R4          (5 << 16)
+#       define R200_TXC_OUTPUT_REG_R5          (6 << 16)
+#       define R200_TXC_OUTPUT_MASK_MASK       (7 << 20)
+#       define R200_TXC_OUTPUT_MASK_RGB                (0 << 20)
+#       define R200_TXC_OUTPUT_MASK_RG         (1 << 20)
+#       define R200_TXC_OUTPUT_MASK_RB         (2 << 20)
+#       define R200_TXC_OUTPUT_MASK_R          (3 << 20)
+#       define R200_TXC_OUTPUT_MASK_GB         (4 << 20)
+#       define R200_TXC_OUTPUT_MASK_G          (5 << 20)
+#       define R200_TXC_OUTPUT_MASK_B          (6 << 20)
+#       define R200_TXC_OUTPUT_MASK_NONE       (7 << 20)
+#       define R200_TXC_REPL_NORMAL            0
+#       define R200_TXC_REPL_RED               1
+#       define R200_TXC_REPL_GREEN             2
+#       define R200_TXC_REPL_BLUE              3
+#       define R200_TXC_REPL_ARG_A_SHIFT       26
+#       define R200_TXC_REPL_ARG_A_MASK                (3 << 26)
+#       define R200_TXC_REPL_ARG_B_SHIFT       28
+#       define R200_TXC_REPL_ARG_B_MASK                (3 << 28)
+#       define R200_TXC_REPL_ARG_C_SHIFT       30
+#       define R200_TXC_REPL_ARG_C_MASK                (3 << 30)
+#define R200_PP_TXABLEND_0                     0x2f08
+#       define R200_TXA_ARG_A_ZERO             (0)
+#       define R200_TXA_ARG_A_CURRENT_ALPHA    (2) /* guess */
+#       define R200_TXA_ARG_A_CURRENT_BLUE     (3) /* guess */
+#       define R200_TXA_ARG_A_DIFFUSE_ALPHA    (4)
+#       define R200_TXA_ARG_A_DIFFUSE_BLUE     (5)
+#       define R200_TXA_ARG_A_SPECULAR_ALPHA   (6)
+#       define R200_TXA_ARG_A_SPECULAR_BLUE    (7)
+#       define R200_TXA_ARG_A_TFACTOR_ALPHA    (8)
+#       define R200_TXA_ARG_A_TFACTOR_BLUE     (9)
+#       define R200_TXA_ARG_A_R0_ALPHA         (10)
+#       define R200_TXA_ARG_A_R0_BLUE          (11)
+#       define R200_TXA_ARG_A_R1_ALPHA         (12)
+#       define R200_TXA_ARG_A_R1_BLUE          (13)
+#       define R200_TXA_ARG_A_R2_ALPHA         (14)
+#       define R200_TXA_ARG_A_R2_BLUE          (15)
+#       define R200_TXA_ARG_A_R3_ALPHA         (16)
+#       define R200_TXA_ARG_A_R3_BLUE          (17)
+#       define R200_TXA_ARG_A_R4_ALPHA         (18)
+#       define R200_TXA_ARG_A_R4_BLUE          (19)
+#       define R200_TXA_ARG_A_R5_ALPHA         (20)
+#       define R200_TXA_ARG_A_R5_BLUE          (21)
+#       define R200_TXA_ARG_A_TFACTOR1_ALPHA   (26)
+#       define R200_TXA_ARG_A_TFACTOR1_BLUE    (27)
+#       define R200_TXA_ARG_A_MASK             (31 << 0)
+#       define R200_TXA_ARG_A_SHIFT            0
+#       define R200_TXA_ARG_B_ZERO             (0 << 5)
+#       define R200_TXA_ARG_B_CURRENT_ALPHA    (2 << 5) /* guess */
+#       define R200_TXA_ARG_B_CURRENT_BLUE     (3 << 5) /* guess */
+#       define R200_TXA_ARG_B_DIFFUSE_ALPHA    (4 << 5)
+#       define R200_TXA_ARG_B_DIFFUSE_BLUE     (5 << 5)
+#       define R200_TXA_ARG_B_SPECULAR_ALPHA   (6 << 5)
+#       define R200_TXA_ARG_B_SPECULAR_BLUE    (7 << 5)
+#       define R200_TXA_ARG_B_TFACTOR_ALPHA    (8 << 5)
+#       define R200_TXA_ARG_B_TFACTOR_BLUE     (9 << 5)
+#       define R200_TXA_ARG_B_R0_ALPHA         (10 << 5)
+#       define R200_TXA_ARG_B_R0_BLUE          (11 << 5)
+#       define R200_TXA_ARG_B_R1_ALPHA         (12 << 5)
+#       define R200_TXA_ARG_B_R1_BLUE          (13 << 5)
+#       define R200_TXA_ARG_B_R2_ALPHA         (14 << 5)
+#       define R200_TXA_ARG_B_R2_BLUE          (15 << 5)
+#       define R200_TXA_ARG_B_R3_ALPHA         (16 << 5)
+#       define R200_TXA_ARG_B_R3_BLUE          (17 << 5)
+#       define R200_TXA_ARG_B_R4_ALPHA         (18 << 5)
+#       define R200_TXA_ARG_B_R4_BLUE          (19 << 5)
+#       define R200_TXA_ARG_B_R5_ALPHA         (20 << 5)
+#       define R200_TXA_ARG_B_R5_BLUE          (21 << 5)
+#       define R200_TXA_ARG_B_TFACTOR1_ALPHA   (26 << 5)
+#       define R200_TXA_ARG_B_TFACTOR1_BLUE    (27 << 5)
+#       define R200_TXA_ARG_B_MASK             (31 << 5)
+#       define R200_TXA_ARG_B_SHIFT                    5
+#       define R200_TXA_ARG_C_ZERO             (0 << 10)
+#       define R200_TXA_ARG_C_CURRENT_ALPHA    (2 << 10) /* guess */
+#       define R200_TXA_ARG_C_CURRENT_BLUE     (3 << 10) /* guess */
+#       define R200_TXA_ARG_C_DIFFUSE_ALPHA    (4 << 10)
+#       define R200_TXA_ARG_C_DIFFUSE_BLUE     (5 << 10)
+#       define R200_TXA_ARG_C_SPECULAR_ALPHA   (6 << 10)
+#       define R200_TXA_ARG_C_SPECULAR_BLUE    (7 << 10)
+#       define R200_TXA_ARG_C_TFACTOR_ALPHA    (8 << 10)
+#       define R200_TXA_ARG_C_TFACTOR_BLUE     (9 << 10)
+#       define R200_TXA_ARG_C_R0_ALPHA         (10 << 10)
+#       define R200_TXA_ARG_C_R0_BLUE          (11 << 10)
+#       define R200_TXA_ARG_C_R1_ALPHA         (12 << 10)
+#       define R200_TXA_ARG_C_R1_BLUE          (13 << 10)
+#       define R200_TXA_ARG_C_R2_ALPHA         (14 << 10)
+#       define R200_TXA_ARG_C_R2_BLUE          (15 << 10)
+#       define R200_TXA_ARG_C_R3_ALPHA         (16 << 10)
+#       define R200_TXA_ARG_C_R3_BLUE          (17 << 10)
+#       define R200_TXA_ARG_C_R4_ALPHA         (18 << 10)
+#       define R200_TXA_ARG_C_R4_BLUE          (19 << 10)
+#       define R200_TXA_ARG_C_R5_ALPHA         (20 << 10)
+#       define R200_TXA_ARG_C_R5_BLUE          (21 << 10)
+#       define R200_TXA_ARG_C_TFACTOR1_ALPHA   (26 << 10)
+#       define R200_TXA_ARG_C_TFACTOR1_BLUE    (27 << 10)
+#       define R200_TXA_ARG_C_MASK             (31 << 10)
+#       define R200_TXA_ARG_C_SHIFT            10
+#       define R200_TXA_COMP_ARG_A             (1 << 16)
+#       define R200_TXA_COMP_ARG_A_SHIFT       (16)
+#       define R200_TXA_BIAS_ARG_A             (1 << 17)
+#       define R200_TXA_SCALE_ARG_A            (1 << 18)
+#       define R200_TXA_NEG_ARG_A              (1 << 19)
+#       define R200_TXA_COMP_ARG_B             (1 << 20)
+#       define R200_TXA_COMP_ARG_B_SHIFT       (20)
+#       define R200_TXA_BIAS_ARG_B             (1 << 21)
+#       define R200_TXA_SCALE_ARG_B            (1 << 22)
+#       define R200_TXA_NEG_ARG_B              (1 << 23)
+#       define R200_TXA_COMP_ARG_C             (1 << 24)
+#       define R200_TXA_COMP_ARG_C_SHIFT       (24)
+#       define R200_TXA_BIAS_ARG_C             (1 << 25)
+#       define R200_TXA_SCALE_ARG_C            (1 << 26)
+#       define R200_TXA_NEG_ARG_C              (1 << 27)
+#       define R200_TXA_OP_MADD                        (0 << 28)
+#       define R200_TXA_OP_CND0                        (2 << 28)
+#       define R200_TXA_OP_LERP                        (3 << 28)
+#       define R200_TXA_OP_CONDITIONAL         (6 << 28)
+#       define R200_TXA_OP_MASK                        (7 << 28)
+#define R200_PP_TXABLEND2_0                    0x2f0c
+#       define R200_TXA_TFACTOR_SEL_SHIFT      0
+#       define R200_TXA_TFACTOR_SEL_MASK       0x7
+#       define R200_TXA_TFACTOR1_SEL_SHIFT     4
+#       define R200_TXA_TFACTOR1_SEL_MASK      (0x7 << 4)
+#       define R200_TXA_SCALE_SHIFT            8
+#       define R200_TXA_SCALE_MASK             (7 << 8)
+#       define R200_TXA_SCALE_1X               (0 << 8)
+#       define R200_TXA_SCALE_2X               (1 << 8)
+#       define R200_TXA_SCALE_4X               (2 << 8)
+#       define R200_TXA_SCALE_8X               (3 << 8)
+#       define R200_TXA_SCALE_INV2             (5 << 8)
+#       define R200_TXA_SCALE_INV4             (6 << 8)
+#       define R200_TXA_SCALE_INV8             (7 << 8)
+#       define R200_TXA_CLAMP_SHIFT            12
+#       define R200_TXA_CLAMP_MASK             (3 << 12)
+#       define R200_TXA_CLAMP_WRAP             (0 << 12)
+#       define R200_TXA_CLAMP_0_1              (1 << 12)
+#       define R200_TXA_CLAMP_8_8              (2 << 12)
+#       define R200_TXA_OUTPUT_REG_MASK                (7 << 16)
+#       define R200_TXA_OUTPUT_REG_NONE                (0 << 16)
+#       define R200_TXA_OUTPUT_REG_R0          (1 << 16)
+#       define R200_TXA_OUTPUT_REG_R1          (2 << 16)
+#       define R200_TXA_OUTPUT_REG_R2          (3 << 16)
+#       define R200_TXA_OUTPUT_REG_R3          (4 << 16)
+#       define R200_TXA_OUTPUT_REG_R4          (5 << 16)
+#       define R200_TXA_OUTPUT_REG_R5          (6 << 16)
+#       define R200_TXA_DOT_ALPHA              (1 << 20)
+#       define R200_TXA_REPL_NORMAL            0
+#       define R200_TXA_REPL_RED               1
+#       define R200_TXA_REPL_GREEN             2
+#       define R200_TXA_REPL_ARG_A_SHIFT       26
+#       define R200_TXA_REPL_ARG_A_MASK                (3 << 26)
+#       define R200_TXA_REPL_ARG_B_SHIFT       28
+#       define R200_TXA_REPL_ARG_B_MASK                (3 << 28)
+#       define R200_TXA_REPL_ARG_C_SHIFT       30
+#       define R200_TXA_REPL_ARG_C_MASK                (3 << 30)
+
+#define R200_SE_VTX_FMT_0                      0x2088
+#       define R200_VTX_XY                     0 /* always have xy */
+#       define R200_VTX_Z0                     (1<<0)
+#       define R200_VTX_W0                     (1<<1)
+#       define R200_VTX_WEIGHT_COUNT_SHIFT     (2)
+#       define R200_VTX_PV_MATRIX_SEL          (1<<5)
+#       define R200_VTX_N0                     (1<<6)
+#       define R200_VTX_POINT_SIZE             (1<<7)
+#       define R200_VTX_DISCRETE_FOG           (1<<8)
+#       define R200_VTX_SHININESS_0            (1<<9)
+#       define R200_VTX_SHININESS_1            (1<<10)
+#       define   R200_VTX_COLOR_NOT_PRESENT    0
+#       define   R200_VTX_PK_RGBA              1
+#       define   R200_VTX_FP_RGB               2
+#       define   R200_VTX_FP_RGBA              3
+#       define   R200_VTX_COLOR_MASK           3
+#       define R200_VTX_COLOR_0_SHIFT          11
+#       define R200_VTX_COLOR_1_SHIFT          13
+#       define R200_VTX_COLOR_2_SHIFT          15
+#       define R200_VTX_COLOR_3_SHIFT          17
+#       define R200_VTX_COLOR_4_SHIFT          19
+#       define R200_VTX_COLOR_5_SHIFT          21
+#       define R200_VTX_COLOR_6_SHIFT          23
+#       define R200_VTX_COLOR_7_SHIFT          25
+#       define R200_VTX_XY1                    (1<<28)
+#       define R200_VTX_Z1                     (1<<29)
+#       define R200_VTX_W1                     (1<<30)
+#       define R200_VTX_N1                     (1<<31)
+#define R200_SE_VTX_FMT_1                      0x208c
+#       define R200_VTX_TEX0_COMP_CNT_SHIFT    0
+#       define R200_VTX_TEX1_COMP_CNT_SHIFT    3
+#       define R200_VTX_TEX2_COMP_CNT_SHIFT    6
+#       define R200_VTX_TEX3_COMP_CNT_SHIFT    9
+#       define R200_VTX_TEX4_COMP_CNT_SHIFT    12
+#       define R200_VTX_TEX5_COMP_CNT_SHIFT    15
+
+#define R200_SE_TCL_OUTPUT_VTX_FMT_0           0x2090
+#define R200_SE_TCL_OUTPUT_VTX_FMT_1           0x2094
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL                0x2250
+#       define R200_OUTPUT_XYZW                        (1<<0)
+#       define R200_OUTPUT_COLOR_0             (1<<8)
+#       define R200_OUTPUT_COLOR_1             (1<<9)
+#       define R200_OUTPUT_TEX_0               (1<<16)
+#       define R200_OUTPUT_TEX_1               (1<<17)
+#       define R200_OUTPUT_TEX_2               (1<<18)
+#       define R200_OUTPUT_TEX_3               (1<<19)
+#       define R200_OUTPUT_TEX_4               (1<<20)
+#       define R200_OUTPUT_TEX_5               (1<<21)
+#       define R200_OUTPUT_TEX_MASK            (0x3f<<16)
+#       define R200_OUTPUT_DISCRETE_FOG                (1<<24)
+#       define R200_OUTPUT_PT_SIZE             (1<<25)
+#       define R200_FORCE_INORDER_PROC         (1<<31)
+#define R200_PP_CNTL_X                         0x2cc4
+#define R200_PP_TXMULTI_CTL_0                  0x2c1c
+#define R200_SE_VTX_STATE_CNTL                 0x2180
+#       define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16)
+
+                               /* Registers for CP and Microcode Engine */
+#define RADEON_CP_ME_RAM_ADDR               0x07d4
+#define RADEON_CP_ME_RAM_RADDR              0x07d8
+#define RADEON_CP_ME_RAM_DATAH              0x07dc
+#define RADEON_CP_ME_RAM_DATAL              0x07e0
+
+#define RADEON_CP_RB_BASE                   0x0700
+#define RADEON_CP_RB_CNTL                   0x0704
+#      define RADEON_RB_BUFSZ_SHIFT            0
+#      define RADEON_RB_BUFSZ_MASK             (0x3f << 0)
+#      define RADEON_RB_BLKSZ_SHIFT            8
+#      define RADEON_RB_BLKSZ_MASK             (0x3f << 8)
+#      define RADEON_MAX_FETCH_SHIFT           18
+#      define RADEON_MAX_FETCH_MASK            (0x3 << 18)
+#      define RADEON_RB_NO_UPDATE              (1 << 27)
+#      define RADEON_RB_RPTR_WR_ENA            (1 << 31)
+#define RADEON_CP_RB_RPTR_ADDR              0x070c
+#define RADEON_CP_RB_RPTR                   0x0710
+#define RADEON_CP_RB_WPTR                   0x0714
+#define RADEON_CP_RB_RPTR_WR                0x071c
+
+#define RADEON_CP_IB_BASE                   0x0738
+#define RADEON_CP_IB_BUFSZ                  0x073c
+
+#define RADEON_CP_CSQ_CNTL                  0x0740
+#       define RADEON_CSQ_CNT_PRIMARY_MASK     (0xff << 0)
+#       define RADEON_CSQ_PRIDIS_INDDIS        (0    << 28)
+#       define RADEON_CSQ_PRIPIO_INDDIS        (1    << 28)
+#       define RADEON_CSQ_PRIBM_INDDIS         (2    << 28)
+#       define RADEON_CSQ_PRIPIO_INDBM         (3    << 28)
+#       define RADEON_CSQ_PRIBM_INDBM          (4    << 28)
+#       define RADEON_CSQ_PRIPIO_INDPIO        (15   << 28)
+
+#define R300_CP_RESYNC_ADDR                 0x778
+#define R300_CP_RESYNC_DATA                 0x77c
+
+#define RADEON_CP_CSQ_STAT                  0x07f8
+#       define RADEON_CSQ_RPTR_PRIMARY_MASK    (0xff <<  0)
+#       define RADEON_CSQ_WPTR_PRIMARY_MASK    (0xff <<  8)
+#       define RADEON_CSQ_RPTR_INDIRECT_MASK   (0xff << 16)
+#       define RADEON_CSQ_WPTR_INDIRECT_MASK   (0xff << 24)
+#define RADEON_CP_CSQ2_STAT                  0x07fc
+#define RADEON_CP_CSQ_ADDR                  0x07f0
+#define RADEON_CP_CSQ_DATA                  0x07f4
+#define RADEON_CP_CSQ_APER_PRIMARY          0x1000
+#define RADEON_CP_CSQ_APER_INDIRECT         0x1300
+
+#define RADEON_CP_RB_WPTR_DELAY             0x0718
+#       define RADEON_PRE_WRITE_TIMER_SHIFT    0
+#       define RADEON_PRE_WRITE_LIMIT_SHIFT    23
+#define RADEON_CP_CSQ_MODE             0x0744
+#      define RADEON_INDIRECT2_START_SHIFT     0
+#      define RADEON_INDIRECT2_START_MASK      (0x7f << 0)
+#      define RADEON_INDIRECT1_START_SHIFT     8
+#      define RADEON_INDIRECT1_START_MASK      (0x7f << 8)
+
+#define RADEON_AIC_CNTL                     0x01d0
+#       define RADEON_PCIGART_TRANSLATE_EN     (1 << 0)
+#       define RADEON_DIS_OUT_OF_PCI_GART_ACCESS     (1 << 1)
+#define RADEON_AIC_LO_ADDR                  0x01dc
+#define RADEON_AIC_PT_BASE             0x01d8
+#define RADEON_AIC_HI_ADDR             0x01e0
+
+
+
+                               /* Constants */
+/* #define RADEON_LAST_FRAME_REG               RADEON_GUI_SCRATCH_REG0 */
+/* efine RADEON_LAST_CLEAR_REG               RADEON_GUI_SCRATCH_REG2 */
+
+
+
+                               /* CP packet types */
+#define RADEON_CP_PACKET0                           0x00000000
+#define RADEON_CP_PACKET1                           0x40000000
+#define RADEON_CP_PACKET2                           0x80000000
+#define RADEON_CP_PACKET3                           0xC0000000
+#       define RADEON_CP_PACKET_MASK                0xC0000000
+#       define RADEON_CP_PACKET_COUNT_MASK          0x3fff0000
+#       define RADEON_CP_PACKET_MAX_DWORDS          (1 << 12)
+#       define RADEON_CP_PACKET0_REG_MASK           0x000007ff
+#       define R300_CP_PACKET0_REG_MASK             0x00001fff
+#       define RADEON_CP_PACKET1_REG0_MASK          0x000007ff
+#       define RADEON_CP_PACKET1_REG1_MASK          0x003ff800
+
+#define RADEON_CP_PACKET0_ONE_REG_WR                0x00008000
+
+#define RADEON_CP_PACKET3_NOP                       0xC0001000
+#define RADEON_CP_PACKET3_NEXT_CHAR                 0xC0001900
+#define RADEON_CP_PACKET3_PLY_NEXTSCAN              0xC0001D00
+#define RADEON_CP_PACKET3_SET_SCISSORS              0xC0001E00
+#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM     0xC0002300
+#define RADEON_CP_PACKET3_LOAD_MICROCODE            0xC0002400
+#define RADEON_CP_PACKET3_WAIT_FOR_IDLE             0xC0002600
+#define RADEON_CP_PACKET3_3D_DRAW_VBUF              0xC0002800
+#define RADEON_CP_PACKET3_3D_DRAW_IMMD              0xC0002900
+#define RADEON_CP_PACKET3_3D_DRAW_INDX              0xC0002A00
+#define RADEON_CP_PACKET3_LOAD_PALETTE              0xC0002C00
+#define R200_CP_PACKET3_3D_DRAW_IMMD_2              0xc0003500
+#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR            0xC0002F00
+#define RADEON_CP_PACKET3_CNTL_PAINT                0xC0009100
+#define RADEON_CP_PACKET3_CNTL_BITBLT               0xC0009200
+#define RADEON_CP_PACKET3_CNTL_SMALLTEXT            0xC0009300
+#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT         0xC0009400
+#define RADEON_CP_PACKET3_CNTL_POLYLINE             0xC0009500
+#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES        0xC0009800
+#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI          0xC0009A00
+#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI         0xC0009B00
+#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT         0xC0009C00
+
+
+#define RADEON_CP_VC_FRMT_XY                        0x00000000
+#define RADEON_CP_VC_FRMT_W0                        0x00000001
+#define RADEON_CP_VC_FRMT_FPCOLOR                   0x00000002
+#define RADEON_CP_VC_FRMT_FPALPHA                   0x00000004
+#define RADEON_CP_VC_FRMT_PKCOLOR                   0x00000008
+#define RADEON_CP_VC_FRMT_FPSPEC                    0x00000010
+#define RADEON_CP_VC_FRMT_FPFOG                     0x00000020
+#define RADEON_CP_VC_FRMT_PKSPEC                    0x00000040
+#define RADEON_CP_VC_FRMT_ST0                       0x00000080
+#define RADEON_CP_VC_FRMT_ST1                       0x00000100
+#define RADEON_CP_VC_FRMT_Q1                        0x00000200
+#define RADEON_CP_VC_FRMT_ST2                       0x00000400
+#define RADEON_CP_VC_FRMT_Q2                        0x00000800
+#define RADEON_CP_VC_FRMT_ST3                       0x00001000
+#define RADEON_CP_VC_FRMT_Q3                        0x00002000
+#define RADEON_CP_VC_FRMT_Q0                        0x00004000
+#define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK      0x00038000
+#define RADEON_CP_VC_FRMT_N0                        0x00040000
+#define RADEON_CP_VC_FRMT_XY1                       0x08000000
+#define RADEON_CP_VC_FRMT_Z1                        0x10000000
+#define RADEON_CP_VC_FRMT_W1                        0x20000000
+#define RADEON_CP_VC_FRMT_N1                        0x40000000
+#define RADEON_CP_VC_FRMT_Z                         0x80000000
+
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE            0x00000000
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT           0x00000001
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE            0x00000002
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP      0x00000003
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST        0x00000004
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN         0x00000005
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP       0x00000006
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2      0x00000007
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST       0x00000008
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST  0x0000000a
+#define RADEON_CP_VC_CNTL_PRIM_WALK_IND             0x00000010
+#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST            0x00000020
+#define RADEON_CP_VC_CNTL_PRIM_WALK_RING            0x00000030
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA          0x00000000
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA          0x00000040
+#define RADEON_CP_VC_CNTL_MAOS_ENABLE               0x00000080
+#define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE   0x00000000
+#define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE       0x00000100
+#define RADEON_CP_VC_CNTL_TCL_DISABLE               0x00000000
+#define RADEON_CP_VC_CNTL_TCL_ENABLE                0x00000200
+#define RADEON_CP_VC_CNTL_NUM_SHIFT                 16
+
+#define RADEON_VS_MATRIX_0_ADDR                   0
+#define RADEON_VS_MATRIX_1_ADDR                   4
+#define RADEON_VS_MATRIX_2_ADDR                   8
+#define RADEON_VS_MATRIX_3_ADDR                  12
+#define RADEON_VS_MATRIX_4_ADDR                  16
+#define RADEON_VS_MATRIX_5_ADDR                  20
+#define RADEON_VS_MATRIX_6_ADDR                  24
+#define RADEON_VS_MATRIX_7_ADDR                  28
+#define RADEON_VS_MATRIX_8_ADDR                  32
+#define RADEON_VS_MATRIX_9_ADDR                  36
+#define RADEON_VS_MATRIX_10_ADDR                 40
+#define RADEON_VS_MATRIX_11_ADDR                 44
+#define RADEON_VS_MATRIX_12_ADDR                 48
+#define RADEON_VS_MATRIX_13_ADDR                 52
+#define RADEON_VS_MATRIX_14_ADDR                 56
+#define RADEON_VS_MATRIX_15_ADDR                 60
+#define RADEON_VS_LIGHT_AMBIENT_ADDR             64
+#define RADEON_VS_LIGHT_DIFFUSE_ADDR             72
+#define RADEON_VS_LIGHT_SPECULAR_ADDR            80
+#define RADEON_VS_LIGHT_DIRPOS_ADDR              88
+#define RADEON_VS_LIGHT_HWVSPOT_ADDR             96
+#define RADEON_VS_LIGHT_ATTENUATION_ADDR        104
+#define RADEON_VS_MATRIX_EYE2CLIP_ADDR          112
+#define RADEON_VS_UCP_ADDR                      116
+#define RADEON_VS_GLOBAL_AMBIENT_ADDR           122
+#define RADEON_VS_FOG_PARAM_ADDR                123
+#define RADEON_VS_EYE_VECTOR_ADDR               124
+
+#define RADEON_SS_LIGHT_DCD_ADDR                  0
+#define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR        8
+#define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR         16
+#define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR     24
+#define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR        32
+#define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR       48
+#define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR    49
+#define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR       50
+#define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR    51
+#define RADEON_SS_SHININESS                      60
+
+#define RADEON_TV_MASTER_CNTL                    0x0800
+#       define RADEON_TV_ASYNC_RST               (1 <<  0)
+#       define RADEON_CRT_ASYNC_RST              (1 <<  1)
+#       define RADEON_RESTART_PHASE_FIX          (1 <<  3)
+#      define RADEON_TV_FIFO_ASYNC_RST          (1 <<  4)
+#      define RADEON_VIN_ASYNC_RST              (1 <<  5)
+#      define RADEON_AUD_ASYNC_RST              (1 <<  6)
+#      define RADEON_DVS_ASYNC_RST              (1 <<  7)
+#       define RADEON_CRT_FIFO_CE_EN             (1 <<  9)
+#       define RADEON_TV_FIFO_CE_EN              (1 << 10)
+#       define RADEON_RE_SYNC_NOW_SEL_MASK       (3 << 14)
+#       define RADEON_TVCLK_ALWAYS_ONb           (1 << 30)
+#      define RADEON_TV_ON                      (1 << 31)
+#define RADEON_TV_PRE_DAC_MUX_CNTL               0x0888
+#       define RADEON_Y_RED_EN                   (1 << 0)
+#       define RADEON_C_GRN_EN                   (1 << 1)
+#       define RADEON_CMP_BLU_EN                 (1 << 2)
+#       define RADEON_DAC_DITHER_EN              (1 << 3)
+#       define RADEON_RED_MX_FORCE_DAC_DATA      (6 << 4)
+#       define RADEON_GRN_MX_FORCE_DAC_DATA      (6 << 8)
+#       define RADEON_BLU_MX_FORCE_DAC_DATA      (6 << 12)
+#       define RADEON_TV_FORCE_DAC_DATA_SHIFT    16
+#define RADEON_TV_RGB_CNTL                           0x0804
+#       define RADEON_SWITCH_TO_BLUE             (1 <<  4)
+#       define RADEON_RGB_DITHER_EN              (1 <<  5)
+#       define RADEON_RGB_SRC_SEL_MASK           (3 <<  8)
+#       define RADEON_RGB_SRC_SEL_CRTC1                  (0 <<  8)
+#       define RADEON_RGB_SRC_SEL_RMX            (1 <<  8)
+#       define RADEON_RGB_SRC_SEL_CRTC2                  (2 <<  8)
+#       define RADEON_RGB_CONVERT_BY_PASS        (1 << 10)
+#       define RADEON_UVRAM_READ_MARGIN_SHIFT    16
+#       define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT          20
+#      define RADEON_TVOUT_SCALE_EN              (1 << 26)
+#define RADEON_TV_SYNC_CNTL                          0x0808
+#       define RADEON_SYNC_OE                     (1 <<  0)
+#       define RADEON_SYNC_OUT                    (1 <<  1)
+#       define RADEON_SYNC_IN                     (1 <<  2)
+#       define RADEON_SYNC_PUB                    (1 <<  3)
+#       define RADEON_SYNC_PD                     (1 <<  4)
+#       define RADEON_TV_SYNC_IO_DRIVE            (1 <<  5)
+#define RADEON_TV_HTOTAL                             0x080c
+#define RADEON_TV_HDISP                              0x0810
+#define RADEON_TV_HSTART                             0x0818
+#define RADEON_TV_HCOUNT                             0x081C
+#define RADEON_TV_VTOTAL                             0x0820
+#define RADEON_TV_VDISP                              0x0824
+#define RADEON_TV_VCOUNT                             0x0828
+#define RADEON_TV_FTOTAL                             0x082c
+#define RADEON_TV_FCOUNT                             0x0830
+#define RADEON_TV_FRESTART                           0x0834
+#define RADEON_TV_HRESTART                           0x0838
+#define RADEON_TV_VRESTART                           0x083c
+#define RADEON_TV_HOST_READ_DATA                     0x0840
+#define RADEON_TV_HOST_WRITE_DATA                    0x0844
+#define RADEON_TV_HOST_RD_WT_CNTL                    0x0848
+#      define RADEON_HOST_FIFO_RD               (1 << 12)
+#      define RADEON_HOST_FIFO_RD_ACK           (1 << 13)
+#      define RADEON_HOST_FIFO_WT               (1 << 14)
+#      define RADEON_HOST_FIFO_WT_ACK           (1 << 15)
+#define RADEON_TV_VSCALER_CNTL1                      0x084c
+#       define RADEON_UV_INC_MASK                0xffff
+#       define RADEON_UV_INC_SHIFT               0
+#       define RADEON_Y_W_EN                    (1 << 24)
+#       define RADEON_RESTART_FIELD              (1 << 29) /* restart on field 0 */
+#       define RADEON_Y_DEL_W_SIG_SHIFT          26
+#define RADEON_TV_TIMING_CNTL                        0x0850
+#       define RADEON_H_INC_MASK                 0xfff
+#       define RADEON_H_INC_SHIFT                0
+#       define RADEON_REQ_Y_FIRST                (1 << 19)
+#       define RADEON_FORCE_BURST_ALWAYS         (1 << 21)
+#       define RADEON_UV_POST_SCALE_BYPASS       (1 << 23)
+#       define RADEON_UV_OUTPUT_POST_SCALE_SHIFT 24
+#define RADEON_TV_VSCALER_CNTL2                      0x0854
+#       define RADEON_DITHER_MODE                (1 <<  0)
+#       define RADEON_Y_OUTPUT_DITHER_EN         (1 <<  1)
+#       define RADEON_UV_OUTPUT_DITHER_EN        (1 <<  2)
+#       define RADEON_UV_TO_BUF_DITHER_EN        (1 <<  3)
+#define RADEON_TV_Y_FALL_CNTL                        0x0858
+#       define RADEON_Y_FALL_PING_PONG           (1 << 16)
+#       define RADEON_Y_COEF_EN                  (1 << 17)
+#define RADEON_TV_Y_RISE_CNTL                        0x085c
+#       define RADEON_Y_RISE_PING_PONG           (1 << 16)
+#define RADEON_TV_Y_SAW_TOOTH_CNTL                   0x0860
+#define RADEON_TV_UPSAMP_AND_GAIN_CNTL               0x0864
+#      define RADEON_YUPSAMP_EN                 (1 <<  0)
+#      define RADEON_UVUPSAMP_EN                (1 <<  2)
+#define RADEON_TV_GAIN_LIMIT_SETTINGS                0x0868
+#       define RADEON_Y_GAIN_LIMIT_SHIFT         0
+#       define RADEON_UV_GAIN_LIMIT_SHIFT        16
+#define RADEON_TV_LINEAR_GAIN_SETTINGS               0x086c
+#       define RADEON_Y_GAIN_SHIFT               0
+#       define RADEON_UV_GAIN_SHIFT              16
+#define RADEON_TV_MODULATOR_CNTL1                    0x0870
+#      define RADEON_YFLT_EN                    (1 <<  2)
+#      define RADEON_UVFLT_EN                   (1 <<  3)
+#       define RADEON_ALT_PHASE_EN               (1 <<  6)
+#       define RADEON_SYNC_TIP_LEVEL             (1 <<  7)
+#       define RADEON_BLANK_LEVEL_SHIFT          8
+#       define RADEON_SET_UP_LEVEL_SHIFT         16
+#      define RADEON_SLEW_RATE_LIMIT            (1 << 23)
+#       define RADEON_CY_FILT_BLEND_SHIFT        28
+#define RADEON_TV_MODULATOR_CNTL2                    0x0874
+#       define RADEON_TV_U_BURST_LEVEL_MASK     0x1ff
+#       define RADEON_TV_V_BURST_LEVEL_MASK     0x1ff
+#       define RADEON_TV_V_BURST_LEVEL_SHIFT    16
+#define RADEON_TV_CRC_CNTL                           0x0890
+#define RADEON_TV_UV_ADR                             0x08ac
+#      define RADEON_MAX_UV_ADR_MASK            0x000000ff
+#      define RADEON_MAX_UV_ADR_SHIFT           0
+#      define RADEON_TABLE1_BOT_ADR_MASK        0x0000ff00
+#      define RADEON_TABLE1_BOT_ADR_SHIFT       8
+#      define RADEON_TABLE3_TOP_ADR_MASK        0x00ff0000
+#      define RADEON_TABLE3_TOP_ADR_SHIFT       16
+#      define RADEON_HCODE_TABLE_SEL_MASK       0x06000000
+#      define RADEON_HCODE_TABLE_SEL_SHIFT      25
+#      define RADEON_VCODE_TABLE_SEL_MASK       0x18000000
+#      define RADEON_VCODE_TABLE_SEL_SHIFT      27
+#      define RADEON_TV_MAX_FIFO_ADDR           0x1a7
+#      define RADEON_TV_MAX_FIFO_ADDR_INTERNAL  0x1ff
+#define RADEON_TV_PLL_FINE_CNTL                             0x0020     /* PLL */
+#define RADEON_TV_PLL_CNTL                           0x0021    /* PLL */
+#       define RADEON_TV_M0LO_MASK               0xff
+#       define RADEON_TV_M0HI_MASK               0x7
+#       define RADEON_TV_M0HI_SHIFT              18
+#       define RADEON_TV_N0LO_MASK               0x1ff
+#       define RADEON_TV_N0LO_SHIFT              8
+#       define RADEON_TV_N0HI_MASK               0x3
+#       define RADEON_TV_N0HI_SHIFT              21
+#       define RADEON_TV_P_MASK                  0xf
+#       define RADEON_TV_P_SHIFT                 24
+#       define RADEON_TV_SLIP_EN                 (1 << 23)
+#       define RADEON_TV_DTO_EN                  (1 << 28)
+#define RADEON_TV_PLL_CNTL1                          0x0022    /* PLL */
+#       define RADEON_TVPLL_RESET                (1 <<  1)
+#       define RADEON_TVPLL_SLEEP                (1 <<  3)
+#       define RADEON_TVPLL_REFCLK_SEL           (1 <<  4)
+#       define RADEON_TVPCP_SHIFT                8
+#       define RADEON_TVPCP_MASK                 (7 << 8)
+#       define RADEON_TVPVG_SHIFT                11
+#       define RADEON_TVPVG_MASK                 (7 << 11)
+#       define RADEON_TVPDC_SHIFT                14
+#       define RADEON_TVPDC_MASK                 (3 << 14)
+#       define RADEON_TVPLL_TEST_DIS             (1 << 31)
+#       define RADEON_TVCLK_SRC_SEL_TVPLL        (1 << 30)
+
+#define RS400_DISP2_REQ_CNTL1                  0xe30
+#       define RS400_DISP2_START_REQ_LEVEL_SHIFT   0
+#       define RS400_DISP2_START_REQ_LEVEL_MASK    0x3ff
+#       define RS400_DISP2_STOP_REQ_LEVEL_SHIFT    12
+#       define RS400_DISP2_STOP_REQ_LEVEL_MASK     0x3ff
+#       define RS400_DISP2_ALLOW_FID_LEVEL_SHIFT   22
+#       define RS400_DISP2_ALLOW_FID_LEVEL_MASK    0x3ff
+#define RS400_DISP2_REQ_CNTL2                  0xe34
+#       define RS400_DISP2_CRITICAL_POINT_START_SHIFT    12
+#       define RS400_DISP2_CRITICAL_POINT_START_MASK     0x3ff
+#       define RS400_DISP2_CRITICAL_POINT_STOP_SHIFT     22
+#       define RS400_DISP2_CRITICAL_POINT_STOP_MASK      0x3ff
+#define RS400_DMIF_MEM_CNTL1                   0xe38
+#       define RS400_DISP2_START_ADR_SHIFT      0
+#       define RS400_DISP2_START_ADR_MASK       0x3ff
+#       define RS400_DISP1_CRITICAL_POINT_START_SHIFT    12
+#       define RS400_DISP1_CRITICAL_POINT_START_MASK     0x3ff
+#       define RS400_DISP1_CRITICAL_POINT_STOP_SHIFT     22
+#       define RS400_DISP1_CRITICAL_POINT_STOP_MASK      0x3ff
+#define RS400_DISP1_REQ_CNTL1                  0xe3c
+#       define RS400_DISP1_START_REQ_LEVEL_SHIFT   0
+#       define RS400_DISP1_START_REQ_LEVEL_MASK    0x3ff
+#       define RS400_DISP1_STOP_REQ_LEVEL_SHIFT    12
+#       define RS400_DISP1_STOP_REQ_LEVEL_MASK     0x3ff
+#       define RS400_DISP1_ALLOW_FID_LEVEL_SHIFT   22
+#       define RS400_DISP1_ALLOW_FID_LEVEL_MASK    0x3ff
+
+#define RADEON_PCIE_INDEX               0x0030
+#define RADEON_PCIE_DATA                0x0034
+#define RADEON_PCIE_TX_GART_CNTL       0x10
+#      define RADEON_PCIE_TX_GART_EN           (1 << 0)
+#      define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
+#      define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO  (1 << 1)
+#      define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD   (3 << 1)
+#      define RADEON_PCIE_TX_GART_MODE_32_128_CACHE    (0 << 3)
+#      define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE   (1 << 3)
+#      define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN      (1 << 5)
+#      define RADEON_PCIE_TX_GART_INVALIDATE_TLB       (1 << 8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE       0x13
+#define RADEON_PCIE_TX_GART_START_LO   0x14
+#define RADEON_PCIE_TX_GART_START_HI   0x15
+#define RADEON_PCIE_TX_GART_END_LO     0x16
+#define RADEON_PCIE_TX_GART_END_HI     0x17
+#define RADEON_PCIE_TX_GART_ERROR      0x18
+
+#define RADEON_SCRATCH_REG0            0x15e0
+#define RADEON_SCRATCH_REG1            0x15e4
+#define RADEON_SCRATCH_REG2            0x15e8
+#define RADEON_SCRATCH_REG3            0x15ec
+#define RADEON_SCRATCH_REG4            0x15f0
+#define RADEON_SCRATCH_REG5            0x15f4
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
new file mode 100644 (file)
index 0000000..a853261
--- /dev/null
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+int radeon_debugfs_ib_init(struct radeon_device *rdev);
+
+/*
+ * IB.
+ */
+int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
+{
+       struct radeon_fence *fence;
+       struct radeon_ib *nib;
+       unsigned long i;
+       int r = 0;
+
+       *ib = NULL;
+       r = radeon_fence_create(rdev, &fence);
+       if (r) {
+               DRM_ERROR("failed to create fence for new IB\n");
+               return r;
+       }
+       mutex_lock(&rdev->ib_pool.mutex);
+       i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+       if (i < RADEON_IB_POOL_SIZE) {
+               set_bit(i, rdev->ib_pool.alloc_bm);
+               rdev->ib_pool.ibs[i].length_dw = 0;
+               *ib = &rdev->ib_pool.ibs[i];
+               goto out;
+       }
+       if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
+               /* we go do nothings here */
+               DRM_ERROR("all IB allocated none scheduled.\n");
+               r = -EINVAL;
+               goto out;
+       }
+       /* get the first ib on the scheduled list */
+       nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
+                        struct radeon_ib, list);
+       if (nib->fence == NULL) {
+               /* we go do nothings here */
+               DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
+               r = -EINVAL;
+               goto out;
+       }
+       r = radeon_fence_wait(nib->fence, false);
+       if (r) {
+               DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
+                         (unsigned long)nib->gpu_addr, nib->length_dw);
+               DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
+               goto out;
+       }
+       radeon_fence_unref(&nib->fence);
+       nib->length_dw = 0;
+       list_del(&nib->list);
+       INIT_LIST_HEAD(&nib->list);
+       *ib = nib;
+out:
+       mutex_unlock(&rdev->ib_pool.mutex);
+       if (r) {
+               radeon_fence_unref(&fence);
+       } else {
+               (*ib)->fence = fence;
+       }
+       return r;
+}
+
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+{
+       struct radeon_ib *tmp = *ib;
+
+       *ib = NULL;
+       if (tmp == NULL) {
+               return;
+       }
+       mutex_lock(&rdev->ib_pool.mutex);
+       if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
+               /* IB is scheduled & not signaled don't do anythings */
+               mutex_unlock(&rdev->ib_pool.mutex);
+               return;
+       }
+       list_del(&tmp->list);
+       INIT_LIST_HEAD(&tmp->list);
+       if (tmp->fence) {
+               radeon_fence_unref(&tmp->fence);
+       }
+       tmp->length_dw = 0;
+       clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
+       mutex_unlock(&rdev->ib_pool.mutex);
+}
+
+static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       while ((ib->length_dw & rdev->cp.align_mask)) {
+               ib->ptr[ib->length_dw++] = PACKET2(0);
+       }
+}
+
+static void radeon_ib_cpu_flush(struct radeon_device *rdev,
+                               struct radeon_ib *ib)
+{
+       unsigned long tmp;
+       unsigned i;
+
+       /* To force CPU cache flush ugly but seems reliable */
+       for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) {
+               tmp = readl(&ib->ptr[i]);
+       }
+}
+
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       int r = 0;
+
+       mutex_lock(&rdev->ib_pool.mutex);
+       radeon_ib_align(rdev, ib);
+       radeon_ib_cpu_flush(rdev, ib);
+       if (!ib->length_dw || !rdev->cp.ready) {
+               /* TODO: Nothings in the ib we should report. */
+               mutex_unlock(&rdev->ib_pool.mutex);
+               DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
+               return -EINVAL;
+       }
+       /* 64 dwords should be enought for fence too */
+       r = radeon_ring_lock(rdev, 64);
+       if (r) {
+               DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
+               mutex_unlock(&rdev->ib_pool.mutex);
+               return r;
+       }
+       radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
+       radeon_ring_write(rdev, ib->gpu_addr);
+       radeon_ring_write(rdev, ib->length_dw);
+       radeon_fence_emit(rdev, ib->fence);
+       radeon_ring_unlock_commit(rdev);
+       list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
+       mutex_unlock(&rdev->ib_pool.mutex);
+       return 0;
+}
+
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+       void *ptr;
+       uint64_t gpu_addr;
+       int i;
+       int r = 0;
+
+       /* Allocate 1M object buffer */
+       INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
+       r = radeon_object_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
+                                true, RADEON_GEM_DOMAIN_GTT,
+                                false, &rdev->ib_pool.robj);
+       if (r) {
+               DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
+               return r;
+       }
+       r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+       if (r) {
+               DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
+               return r;
+       }
+       r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
+       if (r) {
+               DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
+               return r;
+       }
+       for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+               unsigned offset;
+
+               offset = i * 64 * 1024;
+               rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
+               rdev->ib_pool.ibs[i].ptr = ptr + offset;
+               rdev->ib_pool.ibs[i].idx = i;
+               rdev->ib_pool.ibs[i].length_dw = 0;
+               INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
+       }
+       bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+       rdev->ib_pool.ready = true;
+       DRM_INFO("radeon: ib pool ready.\n");
+       if (radeon_debugfs_ib_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for IB !\n");
+       }
+       return r;
+}
+
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+       if (!rdev->ib_pool.ready) {
+               return;
+       }
+       mutex_lock(&rdev->ib_pool.mutex);
+       bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+       if (rdev->ib_pool.robj) {
+               radeon_object_kunmap(rdev->ib_pool.robj);
+               radeon_object_unref(&rdev->ib_pool.robj);
+               rdev->ib_pool.robj = NULL;
+       }
+       mutex_unlock(&rdev->ib_pool.mutex);
+}
+
+int radeon_ib_test(struct radeon_device *rdev)
+{
+       struct radeon_ib *ib;
+       uint32_t scratch;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       r = radeon_scratch_get(rdev, &scratch);
+       if (r) {
+               DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+               return r;
+       }
+       WREG32(scratch, 0xCAFEDEAD);
+       r = radeon_ib_get(rdev, &ib);
+       if (r) {
+               return r;
+       }
+       ib->ptr[0] = PACKET0(scratch, 0);
+       ib->ptr[1] = 0xDEADBEEF;
+       ib->ptr[2] = PACKET2(0);
+       ib->ptr[3] = PACKET2(0);
+       ib->ptr[4] = PACKET2(0);
+       ib->ptr[5] = PACKET2(0);
+       ib->ptr[6] = PACKET2(0);
+       ib->ptr[7] = PACKET2(0);
+       ib->length_dw = 8;
+       r = radeon_ib_schedule(rdev, ib);
+       if (r) {
+               radeon_scratch_free(rdev, scratch);
+               radeon_ib_free(rdev, &ib);
+               return r;
+       }
+       r = radeon_fence_wait(ib->fence, false);
+       if (r) {
+               return r;
+       }
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(scratch);
+               if (tmp == 0xDEADBEEF) {
+                       break;
+               }
+               DRM_UDELAY(1);
+       }
+       if (i < rdev->usec_timeout) {
+               DRM_INFO("ib test succeeded in %u usecs\n", i);
+       } else {
+               DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+                         scratch, tmp);
+               r = -EINVAL;
+       }
+       radeon_scratch_free(rdev, scratch);
+       radeon_ib_free(rdev, &ib);
+       return r;
+}
+
+
+/*
+ * Ring.
+ */
+void radeon_ring_free_size(struct radeon_device *rdev)
+{
+       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+       /* This works because ring_size is a power of 2 */
+       rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
+       rdev->cp.ring_free_dw -= rdev->cp.wptr;
+       rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
+       if (!rdev->cp.ring_free_dw) {
+               rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+       }
+}
+
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+{
+       int r;
+
+       /* Align requested size with padding so unlock_commit can
+        * pad safely */
+       ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
+       mutex_lock(&rdev->cp.mutex);
+       while (ndw > (rdev->cp.ring_free_dw - 1)) {
+               radeon_ring_free_size(rdev);
+               if (ndw < rdev->cp.ring_free_dw) {
+                       break;
+               }
+               r = radeon_fence_wait_next(rdev);
+               if (r) {
+                       mutex_unlock(&rdev->cp.mutex);
+                       return r;
+               }
+       }
+       rdev->cp.count_dw = ndw;
+       rdev->cp.wptr_old = rdev->cp.wptr;
+       return 0;
+}
+
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
+{
+       unsigned count_dw_pad;
+       unsigned i;
+
+       /* We pad to match fetch size */
+       count_dw_pad = (rdev->cp.align_mask + 1) -
+                      (rdev->cp.wptr & rdev->cp.align_mask);
+       for (i = 0; i < count_dw_pad; i++) {
+               radeon_ring_write(rdev, PACKET2(0));
+       }
+       DRM_MEMORYBARRIER();
+       WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
+       (void)RREG32(RADEON_CP_RB_WPTR);
+       mutex_unlock(&rdev->cp.mutex);
+}
+
+void radeon_ring_unlock_undo(struct radeon_device *rdev)
+{
+       rdev->cp.wptr = rdev->cp.wptr_old;
+       mutex_unlock(&rdev->cp.mutex);
+}
+
+int radeon_ring_test(struct radeon_device *rdev)
+{
+       uint32_t scratch;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       r = radeon_scratch_get(rdev, &scratch);
+       if (r) {
+               DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+               return r;
+       }
+       WREG32(scratch, 0xCAFEDEAD);
+       r = radeon_ring_lock(rdev, 2);
+       if (r) {
+               DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+               radeon_scratch_free(rdev, scratch);
+               return r;
+       }
+       radeon_ring_write(rdev, PACKET0(scratch, 0));
+       radeon_ring_write(rdev, 0xDEADBEEF);
+       radeon_ring_unlock_commit(rdev);
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(scratch);
+               if (tmp == 0xDEADBEEF) {
+                       break;
+               }
+               DRM_UDELAY(1);
+       }
+       if (i < rdev->usec_timeout) {
+               DRM_INFO("ring test succeeded in %d usecs\n", i);
+       } else {
+               DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
+                         scratch, tmp);
+               r = -EINVAL;
+       }
+       radeon_scratch_free(rdev, scratch);
+       return r;
+}
+
+int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+       int r;
+
+       rdev->cp.ring_size = ring_size;
+       /* Allocate ring buffer */
+       if (rdev->cp.ring_obj == NULL) {
+               r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
+                                        true,
+                                        RADEON_GEM_DOMAIN_GTT,
+                                        false,
+                                        &rdev->cp.ring_obj);
+               if (r) {
+                       DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
+                       mutex_unlock(&rdev->cp.mutex);
+                       return r;
+               }
+               r = radeon_object_pin(rdev->cp.ring_obj,
+                                     RADEON_GEM_DOMAIN_GTT,
+                                     &rdev->cp.gpu_addr);
+               if (r) {
+                       DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
+                       mutex_unlock(&rdev->cp.mutex);
+                       return r;
+               }
+               r = radeon_object_kmap(rdev->cp.ring_obj,
+                                      (void **)&rdev->cp.ring);
+               if (r) {
+                       DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
+                       mutex_unlock(&rdev->cp.mutex);
+                       return r;
+               }
+       }
+       rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+       rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+       return 0;
+}
+
+void radeon_ring_fini(struct radeon_device *rdev)
+{
+       mutex_lock(&rdev->cp.mutex);
+       if (rdev->cp.ring_obj) {
+               radeon_object_kunmap(rdev->cp.ring_obj);
+               radeon_object_unpin(rdev->cp.ring_obj);
+               radeon_object_unref(&rdev->cp.ring_obj);
+               rdev->cp.ring = NULL;
+               rdev->cp.ring_obj = NULL;
+       }
+       mutex_unlock(&rdev->cp.mutex);
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct radeon_ib *ib = node->info_ent->data;
+       unsigned i;
+
+       if (ib == NULL) {
+               return 0;
+       }
+       seq_printf(m, "IB %04lu\n", ib->idx);
+       seq_printf(m, "IB fence %p\n", ib->fence);
+       seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+       for (i = 0; i < ib->length_dw; i++) {
+               seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+       }
+       return 0;
+}
+
+static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
+static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+#endif
+
+int radeon_debugfs_ib_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       unsigned i;
+
+       for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+               sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
+               radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
+               radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
+               radeon_debugfs_ib_list[i].driver_features = 0;
+               radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
+       }
+       return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
+                                       RADEON_IB_POOL_SIZE);
+#else
+       return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
new file mode 100644 (file)
index 0000000..4c087c1
--- /dev/null
@@ -0,0 +1,653 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *    Dave Airlie
+ */
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
+{
+       struct radeon_mman *mman;
+       struct radeon_device *rdev;
+
+       mman = container_of(bdev, struct radeon_mman, bdev);
+       rdev = container_of(mman, struct radeon_device, mman);
+       return rdev;
+}
+
+
+/*
+ * Global memory.
+ */
+static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
+{
+       return ttm_mem_global_init(ref->object);
+}
+
+static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
+{
+       ttm_mem_global_release(ref->object);
+}
+
+static int radeon_ttm_global_init(struct radeon_device *rdev)
+{
+       struct ttm_global_reference *global_ref;
+       int r;
+
+       rdev->mman.mem_global_referenced = false;
+       global_ref = &rdev->mman.mem_global_ref;
+       global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+       global_ref->size = sizeof(struct ttm_mem_global);
+       global_ref->init = &radeon_ttm_mem_global_init;
+       global_ref->release = &radeon_ttm_mem_global_release;
+       r = ttm_global_item_ref(global_ref);
+       if (r != 0) {
+               DRM_ERROR("Failed referencing a global TTM memory object.\n");
+               return r;
+       }
+       rdev->mman.mem_global_referenced = true;
+       return 0;
+}
+
+static void radeon_ttm_global_fini(struct radeon_device *rdev)
+{
+       if (rdev->mman.mem_global_referenced) {
+               ttm_global_item_unref(&rdev->mman.mem_global_ref);
+               rdev->mman.mem_global_referenced = false;
+       }
+}
+
+struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
+
+static struct ttm_backend*
+radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+{
+       struct radeon_device *rdev;
+
+       rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+       if (rdev->flags & RADEON_IS_AGP) {
+               return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
+       } else
+#endif
+       {
+               return radeon_ttm_backend_create(rdev);
+       }
+}
+
+static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+       return 0;
+}
+
+static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+                               struct ttm_mem_type_manager *man)
+{
+       struct radeon_device *rdev;
+
+       rdev = radeon_get_rdev(bdev);
+
+       switch (type) {
+       case TTM_PL_SYSTEM:
+               /* System memory */
+               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_MASK_CACHING;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       case TTM_PL_TT:
+               man->gpu_offset = 0;
+               man->available_caching = TTM_PL_MASK_CACHING;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+#if __OS_HAS_AGP
+               if (rdev->flags & RADEON_IS_AGP) {
+                       if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
+                               DRM_ERROR("AGP is not enabled for memory type %u\n",
+                                         (unsigned)type);
+                               return -EINVAL;
+                       }
+                       man->io_offset = rdev->mc.agp_base;
+                       man->io_size = rdev->mc.gtt_size;
+                       man->io_addr = NULL;
+                       man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
+                                    TTM_MEMTYPE_FLAG_MAPPABLE;
+                       man->available_caching = TTM_PL_FLAG_UNCACHED |
+                                                TTM_PL_FLAG_WC;
+                       man->default_caching = TTM_PL_FLAG_WC;
+               } else
+#endif
+               {
+                       man->io_offset = 0;
+                       man->io_size = 0;
+                       man->io_addr = NULL;
+                       man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+                                    TTM_MEMTYPE_FLAG_CMA;
+               }
+               break;
+       case TTM_PL_VRAM:
+               /* "On-card" video ram */
+               man->gpu_offset = 0;
+               man->flags = TTM_MEMTYPE_FLAG_FIXED |
+                            TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
+                            TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+               man->default_caching = TTM_PL_FLAG_WC;
+               man->io_addr = NULL;
+               man->io_offset = rdev->mc.aper_base;
+               man->io_size = rdev->mc.aper_size;
+               break;
+       default:
+               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
+{
+       uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
+
+       switch (bo->mem.mem_type) {
+       default:
+               return (cur_placement & ~TTM_PL_MASK_CACHING) |
+                       TTM_PL_FLAG_SYSTEM |
+                       TTM_PL_FLAG_CACHED;
+       }
+}
+
+static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+       return 0;
+}
+
+static void radeon_move_null(struct ttm_buffer_object *bo,
+                            struct ttm_mem_reg *new_mem)
+{
+       struct ttm_mem_reg *old_mem = &bo->mem;
+
+       BUG_ON(old_mem->mm_node != NULL);
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+}
+
+static int radeon_move_blit(struct ttm_buffer_object *bo,
+                           bool evict, int no_wait,
+                           struct ttm_mem_reg *new_mem,
+                           struct ttm_mem_reg *old_mem)
+{
+       struct radeon_device *rdev;
+       uint64_t old_start, new_start;
+       struct radeon_fence *fence;
+       int r;
+
+       rdev = radeon_get_rdev(bo->bdev);
+       r = radeon_fence_create(rdev, &fence);
+       if (unlikely(r)) {
+               return r;
+       }
+       old_start = old_mem->mm_node->start << PAGE_SHIFT;
+       new_start = new_mem->mm_node->start << PAGE_SHIFT;
+
+       switch (old_mem->mem_type) {
+       case TTM_PL_VRAM:
+               old_start += rdev->mc.vram_location;
+               break;
+       case TTM_PL_TT:
+               old_start += rdev->mc.gtt_location;
+               break;
+       default:
+               DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+               return -EINVAL;
+       }
+       switch (new_mem->mem_type) {
+       case TTM_PL_VRAM:
+               new_start += rdev->mc.vram_location;
+               break;
+       case TTM_PL_TT:
+               new_start += rdev->mc.gtt_location;
+               break;
+       default:
+               DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+               return -EINVAL;
+       }
+       if (!rdev->cp.ready) {
+               DRM_ERROR("Trying to move memory with CP turned off.\n");
+               return -EINVAL;
+       }
+       r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
+       /* FIXME: handle copy error */
+       r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
+                                     evict, no_wait, new_mem);
+       radeon_fence_unref(&fence);
+       return r;
+}
+
+static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
+                               bool evict, bool interruptible, bool no_wait,
+                               struct ttm_mem_reg *new_mem)
+{
+       struct radeon_device *rdev;
+       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_mem_reg tmp_mem;
+       uint32_t proposed_placement;
+       int r;
+
+       rdev = radeon_get_rdev(bo->bdev);
+       tmp_mem = *new_mem;
+       tmp_mem.mm_node = NULL;
+       proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+       r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
+                            interruptible, no_wait);
+       if (unlikely(r)) {
+               return r;
+       }
+       r = ttm_tt_bind(bo->ttm, &tmp_mem);
+       if (unlikely(r)) {
+               goto out_cleanup;
+       }
+       r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+       if (unlikely(r)) {
+               goto out_cleanup;
+       }
+       r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+out_cleanup:
+       if (tmp_mem.mm_node) {
+               spin_lock(&rdev->mman.bdev.lru_lock);
+               drm_mm_put_block(tmp_mem.mm_node);
+               spin_unlock(&rdev->mman.bdev.lru_lock);
+               return r;
+       }
+       return r;
+}
+
+static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
+                               bool evict, bool interruptible, bool no_wait,
+                               struct ttm_mem_reg *new_mem)
+{
+       struct radeon_device *rdev;
+       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_mem_reg tmp_mem;
+       uint32_t proposed_flags;
+       int r;
+
+       rdev = radeon_get_rdev(bo->bdev);
+       tmp_mem = *new_mem;
+       tmp_mem.mm_node = NULL;
+       proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+       r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
+                            interruptible, no_wait);
+       if (unlikely(r)) {
+               return r;
+       }
+       r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+       if (unlikely(r)) {
+               goto out_cleanup;
+       }
+       r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+       if (unlikely(r)) {
+               goto out_cleanup;
+       }
+out_cleanup:
+       if (tmp_mem.mm_node) {
+               spin_lock(&rdev->mman.bdev.lru_lock);
+               drm_mm_put_block(tmp_mem.mm_node);
+               spin_unlock(&rdev->mman.bdev.lru_lock);
+               return r;
+       }
+       return r;
+}
+
+static int radeon_bo_move(struct ttm_buffer_object *bo,
+                         bool evict, bool interruptible, bool no_wait,
+                         struct ttm_mem_reg *new_mem)
+{
+       struct radeon_device *rdev;
+       struct ttm_mem_reg *old_mem = &bo->mem;
+       int r;
+
+       rdev = radeon_get_rdev(bo->bdev);
+       if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+               radeon_move_null(bo, new_mem);
+               return 0;
+       }
+       if ((old_mem->mem_type == TTM_PL_TT &&
+            new_mem->mem_type == TTM_PL_SYSTEM) ||
+           (old_mem->mem_type == TTM_PL_SYSTEM &&
+            new_mem->mem_type == TTM_PL_TT)) {
+               /* bind is enought */
+               radeon_move_null(bo, new_mem);
+               return 0;
+       }
+       if (!rdev->cp.ready) {
+               /* use memcpy */
+               DRM_ERROR("CP is not ready use memcpy.\n");
+               return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       }
+
+       if (old_mem->mem_type == TTM_PL_VRAM &&
+           new_mem->mem_type == TTM_PL_SYSTEM) {
+               return radeon_move_vram_ram(bo, evict, interruptible,
+                                           no_wait, new_mem);
+       } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
+                  new_mem->mem_type == TTM_PL_VRAM) {
+               return radeon_move_ram_vram(bo, evict, interruptible,
+                                           no_wait, new_mem);
+       } else {
+               r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+               if (unlikely(r)) {
+                       return r;
+               }
+       }
+       return r;
+}
+
+const uint32_t radeon_mem_prios[] = {
+       TTM_PL_VRAM,
+       TTM_PL_TT,
+       TTM_PL_SYSTEM,
+};
+
+const uint32_t radeon_busy_prios[] = {
+       TTM_PL_TT,
+       TTM_PL_VRAM,
+       TTM_PL_SYSTEM,
+};
+
+static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
+                               bool lazy, bool interruptible)
+{
+       return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
+}
+
+static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
+{
+       return 0;
+}
+
+static void radeon_sync_obj_unref(void **sync_obj)
+{
+       radeon_fence_unref((struct radeon_fence **)sync_obj);
+}
+
+static void *radeon_sync_obj_ref(void *sync_obj)
+{
+       return radeon_fence_ref((struct radeon_fence *)sync_obj);
+}
+
+static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
+{
+       return radeon_fence_signaled((struct radeon_fence *)sync_obj);
+}
+
+static struct ttm_bo_driver radeon_bo_driver = {
+       .mem_type_prio = radeon_mem_prios,
+       .mem_busy_prio = radeon_busy_prios,
+       .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
+       .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
+       .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
+       .invalidate_caches = &radeon_invalidate_caches,
+       .init_mem_type = &radeon_init_mem_type,
+       .evict_flags = &radeon_evict_flags,
+       .move = &radeon_bo_move,
+       .verify_access = &radeon_verify_access,
+       .sync_obj_signaled = &radeon_sync_obj_signaled,
+       .sync_obj_wait = &radeon_sync_obj_wait,
+       .sync_obj_flush = &radeon_sync_obj_flush,
+       .sync_obj_unref = &radeon_sync_obj_unref,
+       .sync_obj_ref = &radeon_sync_obj_ref,
+};
+
+int radeon_ttm_init(struct radeon_device *rdev)
+{
+       int r;
+
+       r = radeon_ttm_global_init(rdev);
+       if (r) {
+               return r;
+       }
+       /* No others user of address space so set it to 0 */
+       r = ttm_bo_device_init(&rdev->mman.bdev,
+                              rdev->mman.mem_global_ref.object,
+                              &radeon_bo_driver, DRM_FILE_PAGE_OFFSET);
+       if (r) {
+               DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+               return r;
+       }
+       r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
+                          ((rdev->mc.aper_size) >> PAGE_SHIFT));
+       if (r) {
+               DRM_ERROR("Failed initializing VRAM heap.\n");
+               return r;
+       }
+       r = radeon_object_create(rdev, NULL, 256 * 1024, true,
+                                RADEON_GEM_DOMAIN_VRAM, false,
+                                &rdev->stollen_vga_memory);
+       if (r) {
+               return r;
+       }
+       r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+       if (r) {
+               radeon_object_unref(&rdev->stollen_vga_memory);
+               return r;
+       }
+       DRM_INFO("radeon: %uM of VRAM memory ready\n",
+                rdev->mc.vram_size / (1024 * 1024));
+       r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
+                          ((rdev->mc.gtt_size) >> PAGE_SHIFT));
+       if (r) {
+               DRM_ERROR("Failed initializing GTT heap.\n");
+               return r;
+       }
+       DRM_INFO("radeon: %uM of GTT memory ready.\n",
+                rdev->mc.gtt_size / (1024 * 1024));
+       if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
+               rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+       }
+       return 0;
+}
+
+void radeon_ttm_fini(struct radeon_device *rdev)
+{
+       if (rdev->stollen_vga_memory) {
+               radeon_object_unpin(rdev->stollen_vga_memory);
+               radeon_object_unref(&rdev->stollen_vga_memory);
+       }
+       ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+       ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
+       ttm_bo_device_release(&rdev->mman.bdev);
+       radeon_gart_fini(rdev);
+       radeon_ttm_global_fini(rdev);
+       DRM_INFO("radeon: ttm finalized\n");
+}
+
+static struct vm_operations_struct radeon_ttm_vm_ops;
+static struct vm_operations_struct *ttm_vm_ops = NULL;
+
+static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct ttm_buffer_object *bo;
+       int r;
+
+       bo = (struct ttm_buffer_object *)vma->vm_private_data;
+       if (bo == NULL) {
+               return VM_FAULT_NOPAGE;
+       }
+       r = ttm_vm_ops->fault(vma, vmf);
+       return r;
+}
+
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *file_priv;
+       struct radeon_device *rdev;
+       int r;
+
+       if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+               return drm_mmap(filp, vma);
+       }
+
+       file_priv = (struct drm_file *)filp->private_data;
+       rdev = file_priv->minor->dev->dev_private;
+       if (rdev == NULL) {
+               return -EINVAL;
+       }
+       r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
+       if (unlikely(r != 0)) {
+               return r;
+       }
+       if (unlikely(ttm_vm_ops == NULL)) {
+               ttm_vm_ops = vma->vm_ops;
+               radeon_ttm_vm_ops = *ttm_vm_ops;
+               radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+       }
+       vma->vm_ops = &radeon_ttm_vm_ops;
+       return 0;
+}
+
+
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_backend {
+       struct ttm_backend              backend;
+       struct radeon_device            *rdev;
+       unsigned long                   num_pages;
+       struct page                     **pages;
+       struct page                     *dummy_read_page;
+       bool                            populated;
+       bool                            bound;
+       unsigned                        offset;
+};
+
+static int radeon_ttm_backend_populate(struct ttm_backend *backend,
+                                      unsigned long num_pages,
+                                      struct page **pages,
+                                      struct page *dummy_read_page)
+{
+       struct radeon_ttm_backend *gtt;
+
+       gtt = container_of(backend, struct radeon_ttm_backend, backend);
+       gtt->pages = pages;
+       gtt->num_pages = num_pages;
+       gtt->dummy_read_page = dummy_read_page;
+       gtt->populated = true;
+       return 0;
+}
+
+static void radeon_ttm_backend_clear(struct ttm_backend *backend)
+{
+       struct radeon_ttm_backend *gtt;
+
+       gtt = container_of(backend, struct radeon_ttm_backend, backend);
+       gtt->pages = NULL;
+       gtt->num_pages = 0;
+       gtt->dummy_read_page = NULL;
+       gtt->populated = false;
+       gtt->bound = false;
+}
+
+
+static int radeon_ttm_backend_bind(struct ttm_backend *backend,
+                                  struct ttm_mem_reg *bo_mem)
+{
+       struct radeon_ttm_backend *gtt;
+       int r;
+
+       gtt = container_of(backend, struct radeon_ttm_backend, backend);
+       gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
+       if (!gtt->num_pages) {
+               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
+       }
+       r = radeon_gart_bind(gtt->rdev, gtt->offset,
+                            gtt->num_pages, gtt->pages);
+       if (r) {
+               DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+                         gtt->num_pages, gtt->offset);
+               return r;
+       }
+       gtt->bound = true;
+       return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
+{
+       struct radeon_ttm_backend *gtt;
+
+       gtt = container_of(backend, struct radeon_ttm_backend, backend);
+       radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
+       gtt->bound = false;
+       return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
+{
+       struct radeon_ttm_backend *gtt;
+
+       gtt = container_of(backend, struct radeon_ttm_backend, backend);
+       if (gtt->bound) {
+               radeon_ttm_backend_unbind(backend);
+       }
+       kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+       .populate = &radeon_ttm_backend_populate,
+       .clear = &radeon_ttm_backend_clear,
+       .bind = &radeon_ttm_backend_bind,
+       .unbind = &radeon_ttm_backend_unbind,
+       .destroy = &radeon_ttm_backend_destroy,
+};
+
+struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
+{
+       struct radeon_ttm_backend *gtt;
+
+       gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
+       if (gtt == NULL) {
+               return NULL;
+       }
+       gtt->backend.bdev = &rdev->mman.bdev;
+       gtt->backend.flags = 0;
+       gtt->backend.func = &radeon_backend_func;
+       gtt->rdev = rdev;
+       gtt->pages = NULL;
+       gtt->num_pages = 0;
+       gtt->dummy_read_page = NULL;
+       gtt->populated = false;
+       gtt->bound = false;
+       return &gtt->backend;
+}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
new file mode 100644 (file)
index 0000000..cc074b5
--- /dev/null
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rs400,rs480 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+void r100_mc_disable_clients(struct radeon_device *rdev);
+int r300_mc_wait_for_idle(struct radeon_device *rdev);
+void r420_pipes_init(struct radeon_device *rdev);
+
+/* This files gather functions specifics to :
+ * rs400,rs480
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void rs400_gpu_init(struct radeon_device *rdev);
+int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+
+/*
+ * GART functions.
+ */
+void rs400_gart_adjust_size(struct radeon_device *rdev)
+{
+       /* Check gart size */
+       switch (rdev->mc.gtt_size/(1024*1024)) {
+       case 32:
+       case 64:
+       case 128:
+       case 256:
+       case 512:
+       case 1024:
+       case 2048:
+               break;
+       default:
+               DRM_ERROR("Unable to use IGP GART size %uM\n",
+                         rdev->mc.gtt_size >> 20);
+               DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
+               DRM_ERROR("Forcing to 32M GART size\n");
+               rdev->mc.gtt_size = 32 * 1024 * 1024;
+               return;
+       }
+       if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+               /* FIXME: RS400 & RS480 seems to have issue with GART size
+                * if 4G of system memory (needs more testing) */
+               rdev->mc.gtt_size = 32 * 1024 * 1024;
+               DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
+       }
+}
+
+void rs400_gart_tlb_flush(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       unsigned int timeout = rdev->usec_timeout;
+
+       WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
+       do {
+               tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+               if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
+                       break;
+               DRM_UDELAY(1);
+               timeout--;
+       } while (timeout > 0);
+       WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
+}
+
+int rs400_gart_enable(struct radeon_device *rdev)
+{
+       uint32_t size_reg;
+       uint32_t tmp;
+       int r;
+
+       /* Initialize common gart structure */
+       r = radeon_gart_init(rdev);
+       if (r) {
+               return r;
+       }
+       if (rs400_debugfs_pcie_gart_info_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
+       }
+
+       tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+       tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+       WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+       /* Check gart size */
+       switch(rdev->mc.gtt_size / (1024 * 1024)) {
+       case 32:
+               size_reg = RS480_VA_SIZE_32MB;
+               break;
+       case 64:
+               size_reg = RS480_VA_SIZE_64MB;
+               break;
+       case 128:
+               size_reg = RS480_VA_SIZE_128MB;
+               break;
+       case 256:
+               size_reg = RS480_VA_SIZE_256MB;
+               break;
+       case 512:
+               size_reg = RS480_VA_SIZE_512MB;
+               break;
+       case 1024:
+               size_reg = RS480_VA_SIZE_1GB;
+               break;
+       case 2048:
+               size_reg = RS480_VA_SIZE_2GB;
+               break;
+       default:
+               return -EINVAL;
+       }
+       if (rdev->gart.table.ram.ptr == NULL) {
+               rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+               r = radeon_gart_table_ram_alloc(rdev);
+               if (r) {
+                       return r;
+               }
+       }
+       /* It should be fine to program it to max value */
+       if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+               WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
+               WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
+       } else {
+               WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
+               WREG32(RS480_AGP_BASE_2, 0);
+       }
+       tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+       tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16);
+       tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
+       if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+               WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
+               tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+               WREG32(RADEON_BUS_CNTL, tmp);
+       } else {
+               WREG32(RADEON_MC_AGP_LOCATION, tmp);
+               tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+               WREG32(RADEON_BUS_CNTL, tmp);
+       }
+       /* Table should be in 32bits address space so ignore bits above. */
+       tmp = rdev->gart.table_addr & 0xfffff000;
+       WREG32_MC(RS480_GART_BASE, tmp);
+       /* TODO: more tweaking here */
+       WREG32_MC(RS480_GART_FEATURE_ID,
+                 (RS480_TLB_ENABLE |
+                  RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
+       /* Disable snooping */
+       WREG32_MC(RS480_AGP_MODE_CNTL,
+                 (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
+       /* Disable AGP mode */
+       /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
+        * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
+       if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+               WREG32_MC(RS480_MC_MISC_CNTL,
+                         (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
+       } else {
+               WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+       }
+       /* Enable gart */
+       WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
+       rs400_gart_tlb_flush(rdev);
+       rdev->gart.ready = true;
+       return 0;
+}
+
+void rs400_gart_disable(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+       tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+       WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+       WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
+}
+
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+       if (i < 0 || i > rdev->gart.num_gpu_pages) {
+               return -EINVAL;
+       }
+       rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC);
+       return 0;
+}
+
+
+/*
+ * MC functions.
+ */
+int rs400_mc_init(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int r;
+
+       if (r100_debugfs_rbbm_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+       }
+
+       rs400_gpu_init(rdev);
+       rs400_gart_disable(rdev);
+       rdev->mc.gtt_location = rdev->mc.vram_size;
+       rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
+       rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
+       rdev->mc.vram_location = 0xFFFFFFFFUL;
+       r = radeon_mc_setup(rdev);
+       if (r) {
+               return r;
+       }
+
+       r100_mc_disable_clients(rdev);
+       if (r300_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+
+       tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+       tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
+       tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
+       WREG32(RADEON_MC_FB_LOCATION, tmp);
+       tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS;
+       WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
+       (void)RREG32(RADEON_HOST_PATH_CNTL);
+       WREG32(RADEON_HOST_PATH_CNTL, tmp);
+       (void)RREG32(RADEON_HOST_PATH_CNTL);
+       return 0;
+}
+
+void rs400_mc_fini(struct radeon_device *rdev)
+{
+       rs400_gart_disable(rdev);
+       radeon_gart_table_ram_free(rdev);
+       radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+void rs400_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+}
+
+void rs400_gpu_init(struct radeon_device *rdev)
+{
+       /* FIXME: HDP same place on rs400 ? */
+       r100_hdp_reset(rdev);
+       /* FIXME: is this correct ? */
+       r420_pipes_init(rdev);
+       if (r300_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+}
+
+
+/*
+ * VRAM info.
+ */
+void rs400_vram_info(struct radeon_device *rdev)
+{
+       uint32_t tom;
+
+       rs400_gart_adjust_size(rdev);
+       /* DDR for all card after R300 & IGP */
+       rdev->mc.vram_is_ddr = true;
+       rdev->mc.vram_width = 128;
+
+       /* read NB_TOM to get the amount of ram stolen for the GPU */
+       tom = RREG32(RADEON_NB_TOM);
+       rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
+       WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+
+       /* Could aper size report 0 ? */
+       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       uint32_t r;
+
+       WREG32(RS480_NB_MC_INDEX, reg & 0xff);
+       r = RREG32(RS480_NB_MC_DATA);
+       WREG32(RS480_NB_MC_INDEX, 0xff);
+       return r;
+}
+
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
+       WREG32(RS480_NB_MC_DATA, (v));
+       WREG32(RS480_NB_MC_INDEX, 0xff);
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t tmp;
+
+       tmp = RREG32(RADEON_HOST_PATH_CNTL);
+       seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+       tmp = RREG32(RADEON_BUS_CNTL);
+       seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+       tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+       seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
+       if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+               tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
+               seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
+               tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
+               seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
+               tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
+               seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
+               tmp = RREG32_MC(0x100);
+               seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
+               tmp = RREG32(0x134);
+               seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
+       } else {
+               tmp = RREG32(RADEON_AGP_BASE);
+               seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+               tmp = RREG32(RS480_AGP_BASE_2);
+               seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
+               tmp = RREG32(RADEON_MC_AGP_LOCATION);
+               seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+       }
+       tmp = RREG32_MC(RS480_GART_BASE);
+       seq_printf(m, "GART_BASE 0x%08x\n", tmp);
+       tmp = RREG32_MC(RS480_GART_FEATURE_ID);
+       seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
+       tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
+       seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
+       tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+       seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x5F);
+       seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
+       tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
+       seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
+       tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+       seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x3B);
+       seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x3C);
+       seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x30);
+       seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x31);
+       seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x32);
+       seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x33);
+       seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x34);
+       seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x35);
+       seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x36);
+       seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
+       tmp = RREG32_MC(0x37);
+       seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
+       return 0;
+}
+
+static struct drm_info_list rs400_gart_info_list[] = {
+       {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
+};
+#endif
+
+int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
+#else
+       return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
new file mode 100644 (file)
index 0000000..ab0c967
--- /dev/null
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rs600 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r300_mc_wait_for_idle(struct radeon_device *rdev);
+void r420_pipes_init(struct radeon_device *rdev);
+
+/* This files gather functions specifics to :
+ * rs600
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void rs600_gpu_init(struct radeon_device *rdev);
+int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+void rs600_disable_vga(struct radeon_device *rdev);
+
+
+/*
+ * GART.
+ */
+void rs600_gart_tlb_flush(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+       tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
+       WREG32_MC(RS600_MC_PT0_CNTL, tmp);
+
+       tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+       tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
+       WREG32_MC(RS600_MC_PT0_CNTL, tmp);
+
+       tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+       tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
+       WREG32_MC(RS600_MC_PT0_CNTL, tmp);
+       tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+}
+
+int rs600_gart_enable(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int i;
+       int r;
+
+       /* Initialize common gart structure */
+       r = radeon_gart_init(rdev);
+       if (r) {
+               return r;
+       }
+       rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
+       r = radeon_gart_table_vram_alloc(rdev);
+       if (r) {
+               return r;
+       }
+       /* FIXME: setup default page */
+       WREG32_MC(RS600_MC_PT0_CNTL,
+                (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
+                 RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
+       for (i = 0; i < 19; i++) {
+               WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i,
+                        (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
+                         RS600_SYSTEM_ACCESS_MODE_IN_SYS |
+                         RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE |
+                         RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
+                         RS600_ENABLE_FRAGMENT_PROCESSING |
+                         RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
+       }
+
+       /* System context map to GART space */
+       WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location);
+       tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+       WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
+
+       /* enable first context */
+       WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location);
+       tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+       WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp);
+       WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL,
+                (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT));
+       /* disable all other contexts */
+       for (i = 1; i < 8; i++) {
+               WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
+       }
+
+       /* setup the page table */
+       WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+                rdev->gart.table_addr);
+       WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+
+       /* enable page tables */
+       tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+       WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT));
+       tmp = RREG32_MC(RS600_MC_CNTL1);
+       WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES));
+       rs600_gart_tlb_flush(rdev);
+       rdev->gart.ready = true;
+       return 0;
+}
+
+void rs600_gart_disable(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       /* FIXME: disable out of gart access */
+       WREG32_MC(RS600_MC_PT0_CNTL, 0);
+       tmp = RREG32_MC(RS600_MC_CNTL1);
+       tmp &= ~RS600_ENABLE_PAGE_TABLES;
+       WREG32_MC(RS600_MC_CNTL1, tmp);
+       radeon_object_kunmap(rdev->gart.table.vram.robj);
+       radeon_object_unpin(rdev->gart.table.vram.robj);
+}
+
+#define R600_PTE_VALID     (1 << 0)
+#define R600_PTE_SYSTEM    (1 << 1)
+#define R600_PTE_SNOOPED   (1 << 2)
+#define R600_PTE_READABLE  (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+       void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+
+       if (i < 0 || i > rdev->gart.num_gpu_pages) {
+               return -EINVAL;
+       }
+       addr = addr & 0xFFFFFFFFFFFFF000ULL;
+       addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+       addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+       writeq(addr, ((void __iomem *)ptr) + (i * 8));
+       return 0;
+}
+
+
+/*
+ * MC.
+ */
+void rs600_mc_disable_clients(struct radeon_device *rdev)
+{
+       unsigned tmp;
+
+       if (r100_gui_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait GUI idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+
+       tmp = RREG32(AVIVO_D1VGA_CONTROL);
+       WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
+       tmp = RREG32(AVIVO_D2VGA_CONTROL);
+       WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
+
+       tmp = RREG32(AVIVO_D1CRTC_CONTROL);
+       WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
+       tmp = RREG32(AVIVO_D2CRTC_CONTROL);
+       WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
+
+       /* make sure all previous write got through */
+       tmp = RREG32(AVIVO_D2CRTC_CONTROL);
+
+       mdelay(1);
+}
+
+int rs600_mc_init(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int r;
+
+       if (r100_debugfs_rbbm_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+       }
+
+       rs600_gpu_init(rdev);
+       rs600_gart_disable(rdev);
+
+       /* Setup GPU memory space */
+       rdev->mc.vram_location = 0xFFFFFFFFUL;
+       rdev->mc.gtt_location = 0xFFFFFFFFUL;
+       r = radeon_mc_setup(rdev);
+       if (r) {
+               return r;
+       }
+
+       /* Program GPU memory space */
+       /* Enable bus master */
+       tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+       WREG32(RADEON_BUS_CNTL, tmp);
+       /* FIXME: What does AGP means for such chipset ? */
+       WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
+       /* FIXME: are this AGP reg in indirect MC range ? */
+       WREG32_MC(RS600_MC_AGP_BASE, 0);
+       WREG32_MC(RS600_MC_AGP_BASE_2, 0);
+       rs600_mc_disable_clients(rdev);
+       if (rs600_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+       tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+       tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
+       tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
+       WREG32_MC(RS600_MC_FB_LOCATION, tmp);
+       WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
+       return 0;
+}
+
+void rs600_mc_fini(struct radeon_device *rdev)
+{
+       rs600_gart_disable(rdev);
+       radeon_gart_table_vram_free(rdev);
+       radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+void rs600_disable_vga(struct radeon_device *rdev)
+{
+       unsigned tmp;
+
+       WREG32(0x330, 0);
+       WREG32(0x338, 0);
+       tmp = RREG32(0x300);
+       tmp &= ~(3 << 16);
+       WREG32(0x300, tmp);
+       WREG32(0x308, (1 << 8));
+       WREG32(0x310, rdev->mc.vram_location);
+       WREG32(0x594, 0);
+}
+
+int rs600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+       unsigned i;
+       uint32_t tmp;
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32_MC(RS600_MC_STATUS);
+               if (tmp & RS600_MC_STATUS_IDLE) {
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       return -1;
+}
+
+void rs600_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+}
+
+void rs600_gpu_init(struct radeon_device *rdev)
+{
+       /* FIXME: HDP same place on rs600 ? */
+       r100_hdp_reset(rdev);
+       rs600_disable_vga(rdev);
+       /* FIXME: is this correct ? */
+       r420_pipes_init(rdev);
+       if (rs600_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+}
+
+
+/*
+ * VRAM info.
+ */
+void rs600_vram_info(struct radeon_device *rdev)
+{
+       /* FIXME: to do or is these values sane ? */
+       rdev->mc.vram_is_ddr = true;
+       rdev->mc.vram_width = 128;
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       uint32_t r;
+
+       WREG32(RS600_MC_INDEX,
+              ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
+       r = RREG32(RS600_MC_DATA);
+       return r;
+}
+
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       WREG32(RS600_MC_INDEX,
+               RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 |
+               ((reg) & RS600_MC_ADDR_MASK));
+       WREG32(RS600_MC_DATA, v);
+}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
new file mode 100644 (file)
index 0000000..79ba850
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rs690,rs740 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+int r300_mc_wait_for_idle(struct radeon_device *rdev);
+void r420_pipes_init(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+void rs600_disable_vga(struct radeon_device *rdev);
+
+/* This files gather functions specifics to :
+ * rs690,rs740
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void rs690_gpu_init(struct radeon_device *rdev);
+int rs690_mc_wait_for_idle(struct radeon_device *rdev);
+
+
+/*
+ * MC functions.
+ */
+int rs690_mc_init(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int r;
+
+       if (r100_debugfs_rbbm_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+       }
+
+       rs690_gpu_init(rdev);
+       rs400_gart_disable(rdev);
+
+       /* Setup GPU memory space */
+       rdev->mc.gtt_location = rdev->mc.vram_size;
+       rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
+       rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
+       rdev->mc.vram_location = 0xFFFFFFFFUL;
+       r = radeon_mc_setup(rdev);
+       if (r) {
+               return r;
+       }
+
+       /* Program GPU memory space */
+       rs600_mc_disable_clients(rdev);
+       if (rs690_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+       tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+       tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
+       tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
+       WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
+       /* FIXME: Does this reg exist on RS480,RS740 ? */
+       WREG32(0x310, rdev->mc.vram_location);
+       WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
+       return 0;
+}
+
+void rs690_mc_fini(struct radeon_device *rdev)
+{
+       rs400_gart_disable(rdev);
+       radeon_gart_table_ram_free(rdev);
+       radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+{
+       unsigned i;
+       uint32_t tmp;
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32_MC(RS690_MC_STATUS);
+               if (tmp & RS690_MC_STATUS_IDLE) {
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       return -1;
+}
+
+void rs690_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+}
+
+void rs690_gpu_init(struct radeon_device *rdev)
+{
+       /* FIXME: HDP same place on rs690 ? */
+       r100_hdp_reset(rdev);
+       rs600_disable_vga(rdev);
+       /* FIXME: is this correct ? */
+       r420_pipes_init(rdev);
+       if (rs690_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+}
+
+
+/*
+ * VRAM info.
+ */
+void rs690_vram_info(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       rs400_gart_adjust_size(rdev);
+       /* DDR for all card after R300 & IGP */
+       rdev->mc.vram_is_ddr = true;
+       /* FIXME: is this correct for RS690/RS740 ? */
+       tmp = RREG32(RADEON_MEM_CNTL);
+       if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
+               rdev->mc.vram_width = 128;
+       } else {
+               rdev->mc.vram_width = 64;
+       }
+       rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+
+       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       uint32_t r;
+
+       WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK));
+       r = RREG32(RS690_MC_DATA);
+       WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
+       return r;
+}
+
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       WREG32(RS690_MC_INDEX,
+              RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK));
+       WREG32(RS690_MC_DATA, v);
+       WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
+}
diff --git a/drivers/gpu/drm/radeon/rs780.c b/drivers/gpu/drm/radeon/rs780.c
new file mode 100644 (file)
index 0000000..0affcff
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rs780  depends on : */
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+
+/* This files gather functions specifics to:
+ * rs780
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+int rs780_mc_wait_for_idle(struct radeon_device *rdev);
+void rs780_gpu_init(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int rs780_mc_init(struct radeon_device *rdev)
+{
+       rs780_gpu_init(rdev);
+       /* FIXME: implement */
+
+       rs600_mc_disable_clients(rdev);
+       if (rs780_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+       return 0;
+}
+
+void rs780_mc_fini(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+}
+
+
+/*
+ * Global GPU functions
+ */
+void rs780_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+}
+
+int rs780_mc_wait_for_idle(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+       return 0;
+}
+
+void rs780_gpu_init(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+}
+
+
+/*
+ * VRAM info
+ */
+void rs780_vram_get_type(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+}
+
+void rs780_vram_info(struct radeon_device *rdev)
+{
+       rs780_vram_get_type(rdev);
+
+       /* FIXME: implement */
+       /* Could aper size report 0 ? */
+       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
new file mode 100644 (file)
index 0000000..7eab95d
--- /dev/null
@@ -0,0 +1,504 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rv515 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+int r100_rb2d_reset(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+int rv370_pcie_gart_enable(struct radeon_device *rdev);
+void rv370_pcie_gart_disable(struct radeon_device *rdev);
+void r420_pipes_init(struct radeon_device *rdev);
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+void rs600_disable_vga(struct radeon_device *rdev);
+
+/* This files gather functions specifics to:
+ * rv515
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
+int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+void rv515_gpu_init(struct radeon_device *rdev);
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int rv515_mc_init(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       int r;
+
+       if (r100_debugfs_rbbm_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+       }
+       if (rv515_debugfs_pipes_info_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for pipes !\n");
+       }
+       if (rv515_debugfs_ga_info_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for pipes !\n");
+       }
+
+       rv515_gpu_init(rdev);
+       rv370_pcie_gart_disable(rdev);
+
+       /* Setup GPU memory space */
+       rdev->mc.vram_location = 0xFFFFFFFFUL;
+       rdev->mc.gtt_location = 0xFFFFFFFFUL;
+       if (rdev->flags & RADEON_IS_AGP) {
+               r = radeon_agp_init(rdev);
+               if (r) {
+                       printk(KERN_WARNING "[drm] Disabling AGP\n");
+                       rdev->flags &= ~RADEON_IS_AGP;
+                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+               } else {
+                       rdev->mc.gtt_location = rdev->mc.agp_base;
+               }
+       }
+       r = radeon_mc_setup(rdev);
+       if (r) {
+               return r;
+       }
+
+       /* Program GPU memory space */
+       rs600_mc_disable_clients(rdev);
+       if (rv515_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+       /* Write VRAM size in case we are limiting it */
+       WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+       tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16);
+       WREG32(0x134, tmp);
+       tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+       tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16);
+       tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16);
+       WREG32_MC(RV515_MC_FB_LOCATION, tmp);
+       WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
+       WREG32(0x310, rdev->mc.vram_location);
+       if (rdev->flags & RADEON_IS_AGP) {
+               tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+               tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16);
+               tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16);
+               WREG32_MC(RV515_MC_AGP_LOCATION, tmp);
+               WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base);
+               WREG32_MC(RV515_MC_AGP_BASE_2, 0);
+       } else {
+               WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF);
+               WREG32_MC(RV515_MC_AGP_BASE, 0);
+               WREG32_MC(RV515_MC_AGP_BASE_2, 0);
+       }
+       return 0;
+}
+
+void rv515_mc_fini(struct radeon_device *rdev)
+{
+       rv370_pcie_gart_disable(rdev);
+       radeon_gart_table_vram_free(rdev);
+       radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+void rv515_ring_start(struct radeon_device *rdev)
+{
+       unsigned gb_tile_config;
+       int r;
+
+       /* Sub pixel 1/12 so we can have 4K rendering according to doc */
+       gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16;
+       switch (rdev->num_gb_pipes) {
+       case 2:
+               gb_tile_config |= R300_PIPE_COUNT_R300;
+               break;
+       case 3:
+               gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+               break;
+       case 4:
+               gb_tile_config |= R300_PIPE_COUNT_R420;
+               break;
+       case 1:
+       default:
+               gb_tile_config |= R300_PIPE_COUNT_RV350;
+               break;
+       }
+
+       r = radeon_ring_lock(rdev, 64);
+       if (r) {
+               return;
+       }
+       radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
+       radeon_ring_write(rdev,
+                         RADEON_ISYNC_ANY2D_IDLE3D |
+                         RADEON_ISYNC_ANY3D_IDLE2D |
+                         RADEON_ISYNC_WAIT_IDLEGUI |
+                         RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+       radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
+       radeon_ring_write(rdev, gb_tile_config);
+       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(rdev,
+                         RADEON_WAIT_2D_IDLECLEAN |
+                         RADEON_WAIT_3D_IDLECLEAN);
+       radeon_ring_write(rdev, PACKET0(0x170C, 0));
+       radeon_ring_write(rdev, 1 << 31);
+       radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, PACKET0(0x42C8, 0));
+       radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
+       radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+       radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(rdev,
+                         RADEON_WAIT_2D_IDLECLEAN |
+                         RADEON_WAIT_3D_IDLECLEAN);
+       radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+       radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+       radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
+       radeon_ring_write(rdev,
+                         ((6 << R300_MS_X0_SHIFT) |
+                          (6 << R300_MS_Y0_SHIFT) |
+                          (6 << R300_MS_X1_SHIFT) |
+                          (6 << R300_MS_Y1_SHIFT) |
+                          (6 << R300_MS_X2_SHIFT) |
+                          (6 << R300_MS_Y2_SHIFT) |
+                          (6 << R300_MSBD0_Y_SHIFT) |
+                          (6 << R300_MSBD0_X_SHIFT)));
+       radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
+       radeon_ring_write(rdev,
+                         ((6 << R300_MS_X3_SHIFT) |
+                          (6 << R300_MS_Y3_SHIFT) |
+                          (6 << R300_MS_X4_SHIFT) |
+                          (6 << R300_MS_Y4_SHIFT) |
+                          (6 << R300_MS_X5_SHIFT) |
+                          (6 << R300_MS_Y5_SHIFT) |
+                          (6 << R300_MSBD1_SHIFT)));
+       radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
+       radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+       radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
+       radeon_ring_write(rdev,
+                         R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
+       radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
+       radeon_ring_write(rdev,
+                         R300_GEOMETRY_ROUND_NEAREST |
+                         R300_COLOR_ROUND_NEAREST);
+       radeon_ring_unlock_commit(rdev);
+}
+
+void rv515_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+}
+
+int rv515_mc_wait_for_idle(struct radeon_device *rdev)
+{
+       unsigned i;
+       uint32_t tmp;
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32_MC(RV515_MC_STATUS);
+               if (tmp & RV515_MC_STATUS_IDLE) {
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       return -1;
+}
+
+void rv515_gpu_init(struct radeon_device *rdev)
+{
+       unsigned pipe_select_current, gb_pipe_select, tmp;
+
+       r100_hdp_reset(rdev);
+       r100_rb2d_reset(rdev);
+
+       if (r100_gui_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait GUI idle while "
+                      "reseting GPU. Bad things might happen.\n");
+       }
+
+       rs600_disable_vga(rdev);
+
+       r420_pipes_init(rdev);
+       gb_pipe_select = RREG32(0x402C);
+       tmp = RREG32(0x170C);
+       pipe_select_current = (tmp >> 2) & 3;
+       tmp = (1 << pipe_select_current) |
+             (((gb_pipe_select >> 8) & 0xF) << 4);
+       WREG32_PLL(0x000D, tmp);
+       if (r100_gui_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait GUI idle while "
+                      "reseting GPU. Bad things might happen.\n");
+       }
+       if (rv515_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+}
+
+int rv515_ga_reset(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+       bool reinit_cp;
+       int i;
+
+       reinit_cp = rdev->cp.ready;
+       rdev->cp.ready = false;
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               WREG32(RADEON_CP_CSQ_MODE, 0);
+               WREG32(RADEON_CP_CSQ_CNTL, 0);
+               WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
+               (void)RREG32(RADEON_RBBM_SOFT_RESET);
+               udelay(200);
+               WREG32(RADEON_RBBM_SOFT_RESET, 0);
+               /* Wait to prevent race in RBBM_STATUS */
+               mdelay(1);
+               tmp = RREG32(RADEON_RBBM_STATUS);
+               if (tmp & ((1 << 20) | (1 << 26))) {
+                       DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
+                       /* GA still busy soft reset it */
+                       WREG32(0x429C, 0x200);
+                       WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
+                       WREG32(0x43E0, 0);
+                       WREG32(0x43E4, 0);
+                       WREG32(0x24AC, 0);
+               }
+               /* Wait to prevent race in RBBM_STATUS */
+               mdelay(1);
+               tmp = RREG32(RADEON_RBBM_STATUS);
+               if (!(tmp & ((1 << 20) | (1 << 26)))) {
+                       break;
+               }
+       }
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(RADEON_RBBM_STATUS);
+               if (!(tmp & ((1 << 20) | (1 << 26)))) {
+                       DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
+                                tmp);
+                       DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
+                       DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
+                       DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
+                       if (reinit_cp) {
+                               return r100_cp_init(rdev, rdev->cp.ring_size);
+                       }
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       tmp = RREG32(RADEON_RBBM_STATUS);
+       DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
+       return -1;
+}
+
+int rv515_gpu_reset(struct radeon_device *rdev)
+{
+       uint32_t status;
+
+       /* reset order likely matter */
+       status = RREG32(RADEON_RBBM_STATUS);
+       /* reset HDP */
+       r100_hdp_reset(rdev);
+       /* reset rb2d */
+       if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
+               r100_rb2d_reset(rdev);
+       }
+       /* reset GA */
+       if (status & ((1 << 20) | (1 << 26))) {
+               rv515_ga_reset(rdev);
+       }
+       /* reset CP */
+       status = RREG32(RADEON_RBBM_STATUS);
+       if (status & (1 << 16)) {
+               r100_cp_reset(rdev);
+       }
+       /* Check if GPU is idle */
+       status = RREG32(RADEON_RBBM_STATUS);
+       if (status & (1 << 31)) {
+               DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+               return -1;
+       }
+       DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+       return 0;
+}
+
+
+/*
+ * VRAM info
+ */
+static void rv515_vram_get_type(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       rdev->mc.vram_width = 128;
+       rdev->mc.vram_is_ddr = true;
+       tmp = RREG32_MC(RV515_MC_CNTL);
+       tmp &= RV515_MEM_NUM_CHANNELS_MASK;
+       switch (tmp) {
+       case 0:
+               rdev->mc.vram_width = 64;
+               break;
+       case 1:
+               rdev->mc.vram_width = 128;
+               break;
+       default:
+               rdev->mc.vram_width = 128;
+               break;
+       }
+}
+
+void rv515_vram_info(struct radeon_device *rdev)
+{
+       rv515_vram_get_type(rdev);
+       rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+
+       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       uint32_t r;
+
+       WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
+       r = RREG32(R520_MC_IND_DATA);
+       WREG32(R520_MC_IND_INDEX, 0);
+       return r;
+}
+
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
+       WREG32(R520_MC_IND_DATA, (v));
+       WREG32(R520_MC_IND_INDEX, 0);
+}
+
+uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       uint32_t r;
+
+       WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff));
+       (void)RREG32(RADEON_PCIE_INDEX);
+       r = RREG32(RADEON_PCIE_DATA);
+       return r;
+}
+
+void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff));
+       (void)RREG32(RADEON_PCIE_INDEX);
+       WREG32(RADEON_PCIE_DATA, (v));
+       (void)RREG32(RADEON_PCIE_DATA);
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t tmp;
+
+       tmp = RREG32(R400_GB_PIPE_SELECT);
+       seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+       tmp = RREG32(R500_SU_REG_DEST);
+       seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
+       tmp = RREG32(R300_GB_TILE_CONFIG);
+       seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+       tmp = RREG32(R300_DST_PIPE_CONFIG);
+       seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+       return 0;
+}
+
+static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       uint32_t tmp;
+
+       tmp = RREG32(0x2140);
+       seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
+       radeon_gpu_reset(rdev);
+       tmp = RREG32(0x425C);
+       seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
+       return 0;
+}
+
+static struct drm_info_list rv515_pipes_info_list[] = {
+       {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
+};
+
+static struct drm_info_list rv515_ga_info_list[] = {
+       {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
+};
+#endif
+
+int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
+#else
+       return 0;
+#endif
+}
+
+int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
+#else
+       return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
new file mode 100644 (file)
index 0000000..da50cc5
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rv770,rv730,rv710  depends on : */
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+
+/* This files gather functions specifics to:
+ * rv770,rv730,rv710
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+int rv770_mc_wait_for_idle(struct radeon_device *rdev);
+void rv770_gpu_init(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int rv770_mc_init(struct radeon_device *rdev)
+{
+       uint32_t tmp;
+
+       rv770_gpu_init(rdev);
+
+       /* setup the gart before changing location so we can ask to
+        * discard unmapped mc request
+        */
+       /* FIXME: disable out of gart access */
+       tmp = rdev->mc.gtt_location / 4096;
+       tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
+       WREG32(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
+       tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
+       tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
+       WREG32(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
+
+       rs600_mc_disable_clients(rdev);
+       if (rv770_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen.\n");
+       }
+
+       tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+       tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24);
+       tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24);
+       WREG32(R700_MC_VM_FB_LOCATION, tmp);
+       tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+       tmp = REG_SET(R700_MC_AGP_TOP, tmp >> 22);
+       WREG32(R700_MC_VM_AGP_TOP, tmp);
+       tmp = REG_SET(R700_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
+       WREG32(R700_MC_VM_AGP_BOT, tmp);
+       return 0;
+}
+
+void rv770_mc_fini(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+}
+
+
+/*
+ * Global GPU functions
+ */
+void rv770_errata(struct radeon_device *rdev)
+{
+       rdev->pll_errata = 0;
+}
+
+int rv770_mc_wait_for_idle(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+       return 0;
+}
+
+void rv770_gpu_init(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+}
+
+
+/*
+ * VRAM info
+ */
+void rv770_vram_get_type(struct radeon_device *rdev)
+{
+       /* FIXME: implement */
+}
+
+void rv770_vram_info(struct radeon_device *rdev)
+{
+       rv770_vram_get_type(rdev);
+
+       /* FIXME: implement */
+       /* Could aper size report 0 ? */
+       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
new file mode 100644 (file)
index 0000000..b0a9de7
--- /dev/null
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+
+ccflags-y := -Iinclude/drm
+ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
+       ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
+
+obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
new file mode 100644 (file)
index 0000000..e8f6d22
--- /dev/null
@@ -0,0 +1,150 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *          Keith Packard.
+ */
+
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#ifdef TTM_HAS_AGP
+#include "ttm/ttm_placement.h"
+#include <linux/agp_backend.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <asm/agp.h>
+
+struct ttm_agp_backend {
+       struct ttm_backend backend;
+       struct agp_memory *mem;
+       struct agp_bridge_data *bridge;
+};
+
+static int ttm_agp_populate(struct ttm_backend *backend,
+                           unsigned long num_pages, struct page **pages,
+                           struct page *dummy_read_page)
+{
+       struct ttm_agp_backend *agp_be =
+           container_of(backend, struct ttm_agp_backend, backend);
+       struct page **cur_page, **last_page = pages + num_pages;
+       struct agp_memory *mem;
+
+       mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
+       if (unlikely(mem == NULL))
+               return -ENOMEM;
+
+       mem->page_count = 0;
+       for (cur_page = pages; cur_page < last_page; ++cur_page) {
+               struct page *page = *cur_page;
+               if (!page)
+                       page = dummy_read_page;
+
+               mem->memory[mem->page_count++] =
+                   phys_to_gart(page_to_phys(page));
+       }
+       agp_be->mem = mem;
+       return 0;
+}
+
+static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
+{
+       struct ttm_agp_backend *agp_be =
+           container_of(backend, struct ttm_agp_backend, backend);
+       struct agp_memory *mem = agp_be->mem;
+       int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+       int ret;
+
+       mem->is_flushed = 1;
+       mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
+
+       ret = agp_bind_memory(mem, bo_mem->mm_node->start);
+       if (ret)
+               printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
+
+       return ret;
+}
+
+static int ttm_agp_unbind(struct ttm_backend *backend)
+{
+       struct ttm_agp_backend *agp_be =
+           container_of(backend, struct ttm_agp_backend, backend);
+
+       if (agp_be->mem->is_bound)
+               return agp_unbind_memory(agp_be->mem);
+       else
+               return 0;
+}
+
+static void ttm_agp_clear(struct ttm_backend *backend)
+{
+       struct ttm_agp_backend *agp_be =
+           container_of(backend, struct ttm_agp_backend, backend);
+       struct agp_memory *mem = agp_be->mem;
+
+       if (mem) {
+               ttm_agp_unbind(backend);
+               agp_free_memory(mem);
+       }
+       agp_be->mem = NULL;
+}
+
+static void ttm_agp_destroy(struct ttm_backend *backend)
+{
+       struct ttm_agp_backend *agp_be =
+           container_of(backend, struct ttm_agp_backend, backend);
+
+       if (agp_be->mem)
+               ttm_agp_clear(backend);
+       kfree(agp_be);
+}
+
+static struct ttm_backend_func ttm_agp_func = {
+       .populate = ttm_agp_populate,
+       .clear = ttm_agp_clear,
+       .bind = ttm_agp_bind,
+       .unbind = ttm_agp_unbind,
+       .destroy = ttm_agp_destroy,
+};
+
+struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
+                                        struct agp_bridge_data *bridge)
+{
+       struct ttm_agp_backend *agp_be;
+
+       agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
+       if (!agp_be)
+               return NULL;
+
+       agp_be->mem = NULL;
+       agp_be->bridge = bridge;
+       agp_be->backend.func = &ttm_agp_func;
+       agp_be->backend.bdev = bdev;
+       return &agp_be->backend;
+}
+EXPORT_SYMBOL(ttm_agp_backend_init);
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
new file mode 100644 (file)
index 0000000..1587aec
--- /dev/null
@@ -0,0 +1,1698 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/module.h>
+
+#define TTM_ASSERT_LOCKED(param)
+#define TTM_DEBUG(fmt, arg...)
+#define TTM_BO_HASH_ORDER 13
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
+static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
+
+static inline uint32_t ttm_bo_type_flags(unsigned type)
+{
+       return 1 << (type);
+}
+
+static void ttm_bo_release_list(struct kref *list_kref)
+{
+       struct ttm_buffer_object *bo =
+           container_of(list_kref, struct ttm_buffer_object, list_kref);
+       struct ttm_bo_device *bdev = bo->bdev;
+
+       BUG_ON(atomic_read(&bo->list_kref.refcount));
+       BUG_ON(atomic_read(&bo->kref.refcount));
+       BUG_ON(atomic_read(&bo->cpu_writers));
+       BUG_ON(bo->sync_obj != NULL);
+       BUG_ON(bo->mem.mm_node != NULL);
+       BUG_ON(!list_empty(&bo->lru));
+       BUG_ON(!list_empty(&bo->ddestroy));
+
+       if (bo->ttm)
+               ttm_tt_destroy(bo->ttm);
+       if (bo->destroy)
+               bo->destroy(bo);
+       else {
+               ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
+               kfree(bo);
+       }
+}
+
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+{
+
+       if (interruptible) {
+               int ret = 0;
+
+               ret = wait_event_interruptible(bo->event_queue,
+                                              atomic_read(&bo->reserved) == 0);
+               if (unlikely(ret != 0))
+                       return -ERESTART;
+       } else {
+               wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+       }
+       return 0;
+}
+
+static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_mem_type_manager *man;
+
+       BUG_ON(!atomic_read(&bo->reserved));
+
+       if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+
+               BUG_ON(!list_empty(&bo->lru));
+
+               man = &bdev->man[bo->mem.mem_type];
+               list_add_tail(&bo->lru, &man->lru);
+               kref_get(&bo->list_kref);
+
+               if (bo->ttm != NULL) {
+                       list_add_tail(&bo->swap, &bdev->swap_lru);
+                       kref_get(&bo->list_kref);
+               }
+       }
+}
+
+/**
+ * Call with the lru_lock held.
+ */
+
+static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+{
+       int put_count = 0;
+
+       if (!list_empty(&bo->swap)) {
+               list_del_init(&bo->swap);
+               ++put_count;
+       }
+       if (!list_empty(&bo->lru)) {
+               list_del_init(&bo->lru);
+               ++put_count;
+       }
+
+       /*
+        * TODO: Add a driver hook to delete from
+        * driver-specific LRU's here.
+        */
+
+       return put_count;
+}
+
+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+                         bool interruptible,
+                         bool no_wait, bool use_sequence, uint32_t sequence)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       int ret;
+
+       while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+               if (use_sequence && bo->seq_valid &&
+                       (sequence - bo->val_seq < (1 << 31))) {
+                       return -EAGAIN;
+               }
+
+               if (no_wait)
+                       return -EBUSY;
+
+               spin_unlock(&bdev->lru_lock);
+               ret = ttm_bo_wait_unreserved(bo, interruptible);
+               spin_lock(&bdev->lru_lock);
+
+               if (unlikely(ret))
+                       return ret;
+       }
+
+       if (use_sequence) {
+               bo->val_seq = sequence;
+               bo->seq_valid = true;
+       } else {
+               bo->seq_valid = false;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(ttm_bo_reserve);
+
+static void ttm_bo_ref_bug(struct kref *list_kref)
+{
+       BUG();
+}
+
+int ttm_bo_reserve(struct ttm_buffer_object *bo,
+                  bool interruptible,
+                  bool no_wait, bool use_sequence, uint32_t sequence)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       int put_count = 0;
+       int ret;
+
+       spin_lock(&bdev->lru_lock);
+       ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
+                                   sequence);
+       if (likely(ret == 0))
+               put_count = ttm_bo_del_from_lru(bo);
+       spin_unlock(&bdev->lru_lock);
+
+       while (put_count--)
+               kref_put(&bo->list_kref, ttm_bo_ref_bug);
+
+       return ret;
+}
+
+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+
+       spin_lock(&bdev->lru_lock);
+       ttm_bo_add_to_lru(bo);
+       atomic_set(&bo->reserved, 0);
+       wake_up_all(&bo->event_queue);
+       spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_unreserve);
+
+/*
+ * Call bo->mutex locked.
+ */
+
+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       int ret = 0;
+       uint32_t page_flags = 0;
+
+       TTM_ASSERT_LOCKED(&bo->mutex);
+       bo->ttm = NULL;
+
+       switch (bo->type) {
+       case ttm_bo_type_device:
+               if (zero_alloc)
+                       page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+       case ttm_bo_type_kernel:
+               bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+                                       page_flags, bdev->dummy_read_page);
+               if (unlikely(bo->ttm == NULL))
+                       ret = -ENOMEM;
+               break;
+       case ttm_bo_type_user:
+               bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+                                       page_flags | TTM_PAGE_FLAG_USER,
+                                       bdev->dummy_read_page);
+               if (unlikely(bo->ttm == NULL))
+                       ret = -ENOMEM;
+               break;
+
+               ret = ttm_tt_set_user(bo->ttm, current,
+                                     bo->buffer_start, bo->num_pages);
+               if (unlikely(ret != 0))
+                       ttm_tt_destroy(bo->ttm);
+               break;
+       default:
+               printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+                                 struct ttm_mem_reg *mem,
+                                 bool evict, bool interruptible, bool no_wait)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
+       bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
+       struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
+       struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
+       int ret = 0;
+
+       if (old_is_pci || new_is_pci ||
+           ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
+               ttm_bo_unmap_virtual(bo);
+
+       /*
+        * Create and bind a ttm if required.
+        */
+
+       if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
+               ret = ttm_bo_add_ttm(bo, false);
+               if (ret)
+                       goto out_err;
+
+               ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
+               if (ret)
+                       return ret;
+
+               if (mem->mem_type != TTM_PL_SYSTEM) {
+                       ret = ttm_tt_bind(bo->ttm, mem);
+                       if (ret)
+                               goto out_err;
+               }
+
+               if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+
+                       struct ttm_mem_reg *old_mem = &bo->mem;
+                       uint32_t save_flags = old_mem->placement;
+
+                       *old_mem = *mem;
+                       mem->mm_node = NULL;
+                       ttm_flag_masked(&save_flags, mem->placement,
+                                       TTM_PL_MASK_MEMTYPE);
+                       goto moved;
+               }
+
+       }
+
+       if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+           !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
+               ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+       else if (bdev->driver->move)
+               ret = bdev->driver->move(bo, evict, interruptible,
+                                        no_wait, mem);
+       else
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+
+       if (ret)
+               goto out_err;
+
+moved:
+       if (bo->evicted) {
+               ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+               if (ret)
+                       printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
+               bo->evicted = false;
+       }
+
+       if (bo->mem.mm_node) {
+               spin_lock(&bo->lock);
+               bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
+                   bdev->man[bo->mem.mem_type].gpu_offset;
+               bo->cur_placement = bo->mem.placement;
+               spin_unlock(&bo->lock);
+       }
+
+       return 0;
+
+out_err:
+       new_man = &bdev->man[bo->mem.mem_type];
+       if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
+               ttm_tt_unbind(bo->ttm);
+               ttm_tt_destroy(bo->ttm);
+               bo->ttm = NULL;
+       }
+
+       return ret;
+}
+
+/**
+ * If bo idle, remove from delayed- and lru lists, and unref.
+ * If not idle, and already on delayed list, do nothing.
+ * If not idle, and not on delayed list, put on delayed list,
+ *   up the list_kref and schedule a delayed list check.
+ */
+
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_driver *driver = bdev->driver;
+       int ret;
+
+       spin_lock(&bo->lock);
+       (void) ttm_bo_wait(bo, false, false, !remove_all);
+
+       if (!bo->sync_obj) {
+               int put_count;
+
+               spin_unlock(&bo->lock);
+
+               spin_lock(&bdev->lru_lock);
+               ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
+               BUG_ON(ret);
+               if (bo->ttm)
+                       ttm_tt_unbind(bo->ttm);
+
+               if (!list_empty(&bo->ddestroy)) {
+                       list_del_init(&bo->ddestroy);
+                       kref_put(&bo->list_kref, ttm_bo_ref_bug);
+               }
+               if (bo->mem.mm_node) {
+                       drm_mm_put_block(bo->mem.mm_node);
+                       bo->mem.mm_node = NULL;
+               }
+               put_count = ttm_bo_del_from_lru(bo);
+               spin_unlock(&bdev->lru_lock);
+
+               atomic_set(&bo->reserved, 0);
+
+               while (put_count--)
+                       kref_put(&bo->list_kref, ttm_bo_release_list);
+
+               return 0;
+       }
+
+       spin_lock(&bdev->lru_lock);
+       if (list_empty(&bo->ddestroy)) {
+               void *sync_obj = bo->sync_obj;
+               void *sync_obj_arg = bo->sync_obj_arg;
+
+               kref_get(&bo->list_kref);
+               list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+               spin_unlock(&bdev->lru_lock);
+               spin_unlock(&bo->lock);
+
+               if (sync_obj)
+                       driver->sync_obj_flush(sync_obj, sync_obj_arg);
+               schedule_delayed_work(&bdev->wq,
+                                     ((HZ / 100) < 1) ? 1 : HZ / 100);
+               ret = 0;
+
+       } else {
+               spin_unlock(&bdev->lru_lock);
+               spin_unlock(&bo->lock);
+               ret = -EBUSY;
+       }
+
+       return ret;
+}
+
+/**
+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
+ * encountered buffers.
+ */
+
+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+{
+       struct ttm_buffer_object *entry, *nentry;
+       struct list_head *list, *next;
+       int ret;
+
+       spin_lock(&bdev->lru_lock);
+       list_for_each_safe(list, next, &bdev->ddestroy) {
+               entry = list_entry(list, struct ttm_buffer_object, ddestroy);
+               nentry = NULL;
+
+               /*
+                * Protect the next list entry from destruction while we
+                * unlock the lru_lock.
+                */
+
+               if (next != &bdev->ddestroy) {
+                       nentry = list_entry(next, struct ttm_buffer_object,
+                                           ddestroy);
+                       kref_get(&nentry->list_kref);
+               }
+               kref_get(&entry->list_kref);
+
+               spin_unlock(&bdev->lru_lock);
+               ret = ttm_bo_cleanup_refs(entry, remove_all);
+               kref_put(&entry->list_kref, ttm_bo_release_list);
+
+               spin_lock(&bdev->lru_lock);
+               if (nentry) {
+                       bool next_onlist = !list_empty(next);
+                       spin_unlock(&bdev->lru_lock);
+                       kref_put(&nentry->list_kref, ttm_bo_release_list);
+                       spin_lock(&bdev->lru_lock);
+                       /*
+                        * Someone might have raced us and removed the
+                        * next entry from the list. We don't bother restarting
+                        * list traversal.
+                        */
+
+                       if (!next_onlist)
+                               break;
+               }
+               if (ret)
+                       break;
+       }
+       ret = !list_empty(&bdev->ddestroy);
+       spin_unlock(&bdev->lru_lock);
+
+       return ret;
+}
+
+static void ttm_bo_delayed_workqueue(struct work_struct *work)
+{
+       struct ttm_bo_device *bdev =
+           container_of(work, struct ttm_bo_device, wq.work);
+
+       if (ttm_bo_delayed_delete(bdev, false)) {
+               schedule_delayed_work(&bdev->wq,
+                                     ((HZ / 100) < 1) ? 1 : HZ / 100);
+       }
+}
+
+static void ttm_bo_release(struct kref *kref)
+{
+       struct ttm_buffer_object *bo =
+           container_of(kref, struct ttm_buffer_object, kref);
+       struct ttm_bo_device *bdev = bo->bdev;
+
+       if (likely(bo->vm_node != NULL)) {
+               rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
+               drm_mm_put_block(bo->vm_node);
+               bo->vm_node = NULL;
+       }
+       write_unlock(&bdev->vm_lock);
+       ttm_bo_cleanup_refs(bo, false);
+       kref_put(&bo->list_kref, ttm_bo_release_list);
+       write_lock(&bdev->vm_lock);
+}
+
+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
+{
+       struct ttm_buffer_object *bo = *p_bo;
+       struct ttm_bo_device *bdev = bo->bdev;
+
+       *p_bo = NULL;
+       write_lock(&bdev->vm_lock);
+       kref_put(&bo->kref, ttm_bo_release);
+       write_unlock(&bdev->vm_lock);
+}
+EXPORT_SYMBOL(ttm_bo_unref);
+
+static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
+                       bool interruptible, bool no_wait)
+{
+       int ret = 0;
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_mem_reg evict_mem;
+       uint32_t proposed_placement;
+
+       if (bo->mem.mem_type != mem_type)
+               goto out;
+
+       spin_lock(&bo->lock);
+       ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+       spin_unlock(&bo->lock);
+
+       if (ret && ret != -ERESTART) {
+               printk(KERN_ERR TTM_PFX "Failed to expire sync object before "
+                      "buffer eviction.\n");
+               goto out;
+       }
+
+       BUG_ON(!atomic_read(&bo->reserved));
+
+       evict_mem = bo->mem;
+       evict_mem.mm_node = NULL;
+
+       proposed_placement = bdev->driver->evict_flags(bo);
+
+       ret = ttm_bo_mem_space(bo, proposed_placement,
+                              &evict_mem, interruptible, no_wait);
+       if (unlikely(ret != 0 && ret != -ERESTART))
+               ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
+                                      &evict_mem, interruptible, no_wait);
+
+       if (ret) {
+               if (ret != -ERESTART)
+                       printk(KERN_ERR TTM_PFX
+                              "Failed to find memory space for "
+                              "buffer 0x%p eviction.\n", bo);
+               goto out;
+       }
+
+       ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
+                                    no_wait);
+       if (ret) {
+               if (ret != -ERESTART)
+                       printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+               goto out;
+       }
+
+       spin_lock(&bdev->lru_lock);
+       if (evict_mem.mm_node) {
+               drm_mm_put_block(evict_mem.mm_node);
+               evict_mem.mm_node = NULL;
+       }
+       spin_unlock(&bdev->lru_lock);
+       bo->evicted = true;
+out:
+       return ret;
+}
+
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
+                                 struct ttm_mem_reg *mem,
+                                 uint32_t mem_type,
+                                 bool interruptible, bool no_wait)
+{
+       struct drm_mm_node *node;
+       struct ttm_buffer_object *entry;
+       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+       struct list_head *lru;
+       unsigned long num_pages = mem->num_pages;
+       int put_count = 0;
+       int ret;
+
+retry_pre_get:
+       ret = drm_mm_pre_get(&man->manager);
+       if (unlikely(ret != 0))
+               return ret;
+
+       spin_lock(&bdev->lru_lock);
+       do {
+               node = drm_mm_search_free(&man->manager, num_pages,
+                                         mem->page_alignment, 1);
+               if (node)
+                       break;
+
+               lru = &man->lru;
+               if (list_empty(lru))
+                       break;
+
+               entry = list_first_entry(lru, struct ttm_buffer_object, lru);
+               kref_get(&entry->list_kref);
+
+               ret =
+                   ttm_bo_reserve_locked(entry, interruptible, no_wait,
+                                         false, 0);
+
+               if (likely(ret == 0))
+                       put_count = ttm_bo_del_from_lru(entry);
+
+               spin_unlock(&bdev->lru_lock);
+
+               if (unlikely(ret != 0))
+                       return ret;
+
+               while (put_count--)
+                       kref_put(&entry->list_kref, ttm_bo_ref_bug);
+
+               ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
+
+               ttm_bo_unreserve(entry);
+
+               kref_put(&entry->list_kref, ttm_bo_release_list);
+               if (ret)
+                       return ret;
+
+               spin_lock(&bdev->lru_lock);
+       } while (1);
+
+       if (!node) {
+               spin_unlock(&bdev->lru_lock);
+               return -ENOMEM;
+       }
+
+       node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
+       if (unlikely(!node)) {
+               spin_unlock(&bdev->lru_lock);
+               goto retry_pre_get;
+       }
+
+       spin_unlock(&bdev->lru_lock);
+       mem->mm_node = node;
+       mem->mem_type = mem_type;
+       return 0;
+}
+
+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+                                bool disallow_fixed,
+                                uint32_t mem_type,
+                                uint32_t mask, uint32_t *res_mask)
+{
+       uint32_t cur_flags = ttm_bo_type_flags(mem_type);
+
+       if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
+               return false;
+
+       if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
+               return false;
+
+       if ((mask & man->available_caching) == 0)
+               return false;
+       if (mask & man->default_caching)
+               cur_flags |= man->default_caching;
+       else if (mask & TTM_PL_FLAG_CACHED)
+               cur_flags |= TTM_PL_FLAG_CACHED;
+       else if (mask & TTM_PL_FLAG_WC)
+               cur_flags |= TTM_PL_FLAG_WC;
+       else
+               cur_flags |= TTM_PL_FLAG_UNCACHED;
+
+       *res_mask = cur_flags;
+       return true;
+}
+
+/**
+ * Creates space for memory region @mem according to its type.
+ *
+ * This function first searches for free space in compatible memory types in
+ * the priority order defined by the driver.  If free space isn't found, then
+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
+ * space.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+                    uint32_t proposed_placement,
+                    struct ttm_mem_reg *mem,
+                    bool interruptible, bool no_wait)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_mem_type_manager *man;
+
+       uint32_t num_prios = bdev->driver->num_mem_type_prio;
+       const uint32_t *prios = bdev->driver->mem_type_prio;
+       uint32_t i;
+       uint32_t mem_type = TTM_PL_SYSTEM;
+       uint32_t cur_flags = 0;
+       bool type_found = false;
+       bool type_ok = false;
+       bool has_eagain = false;
+       struct drm_mm_node *node = NULL;
+       int ret;
+
+       mem->mm_node = NULL;
+       for (i = 0; i < num_prios; ++i) {
+               mem_type = prios[i];
+               man = &bdev->man[mem_type];
+
+               type_ok = ttm_bo_mt_compatible(man,
+                                              bo->type == ttm_bo_type_user,
+                                              mem_type, proposed_placement,
+                                              &cur_flags);
+
+               if (!type_ok)
+                       continue;
+
+               if (mem_type == TTM_PL_SYSTEM)
+                       break;
+
+               if (man->has_type && man->use_type) {
+                       type_found = true;
+                       do {
+                               ret = drm_mm_pre_get(&man->manager);
+                               if (unlikely(ret))
+                                       return ret;
+
+                               spin_lock(&bdev->lru_lock);
+                               node = drm_mm_search_free(&man->manager,
+                                                         mem->num_pages,
+                                                         mem->page_alignment,
+                                                         1);
+                               if (unlikely(!node)) {
+                                       spin_unlock(&bdev->lru_lock);
+                                       break;
+                               }
+                               node = drm_mm_get_block_atomic(node,
+                                                              mem->num_pages,
+                                                              mem->
+                                                              page_alignment);
+                               spin_unlock(&bdev->lru_lock);
+                       } while (!node);
+               }
+               if (node)
+                       break;
+       }
+
+       if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
+               mem->mm_node = node;
+               mem->mem_type = mem_type;
+               mem->placement = cur_flags;
+               return 0;
+       }
+
+       if (!type_found)
+               return -EINVAL;
+
+       num_prios = bdev->driver->num_mem_busy_prio;
+       prios = bdev->driver->mem_busy_prio;
+
+       for (i = 0; i < num_prios; ++i) {
+               mem_type = prios[i];
+               man = &bdev->man[mem_type];
+
+               if (!man->has_type)
+                       continue;
+
+               if (!ttm_bo_mt_compatible(man,
+                                         bo->type == ttm_bo_type_user,
+                                         mem_type,
+                                         proposed_placement, &cur_flags))
+                       continue;
+
+               ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
+                                            interruptible, no_wait);
+
+               if (ret == 0 && mem->mm_node) {
+                       mem->placement = cur_flags;
+                       return 0;
+               }
+
+               if (ret == -ERESTART)
+                       has_eagain = true;
+       }
+
+       ret = (has_eagain) ? -ERESTART : -ENOMEM;
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_mem_space);
+
+int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
+{
+       int ret = 0;
+
+       if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
+               return -EBUSY;
+
+       ret = wait_event_interruptible(bo->event_queue,
+                                      atomic_read(&bo->cpu_writers) == 0);
+
+       if (ret == -ERESTARTSYS)
+               ret = -ERESTART;
+
+       return ret;
+}
+
+int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+                      uint32_t proposed_placement,
+                      bool interruptible, bool no_wait)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       int ret = 0;
+       struct ttm_mem_reg mem;
+
+       BUG_ON(!atomic_read(&bo->reserved));
+
+       /*
+        * FIXME: It's possible to pipeline buffer moves.
+        * Have the driver move function wait for idle when necessary,
+        * instead of doing it here.
+        */
+
+       spin_lock(&bo->lock);
+       ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+       spin_unlock(&bo->lock);
+
+       if (ret)
+               return ret;
+
+       mem.num_pages = bo->num_pages;
+       mem.size = mem.num_pages << PAGE_SHIFT;
+       mem.page_alignment = bo->mem.page_alignment;
+
+       /*
+        * Determine where to move the buffer.
+        */
+
+       ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
+                              interruptible, no_wait);
+       if (ret)
+               goto out_unlock;
+
+       ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+
+out_unlock:
+       if (ret && mem.mm_node) {
+               spin_lock(&bdev->lru_lock);
+               drm_mm_put_block(mem.mm_node);
+               spin_unlock(&bdev->lru_lock);
+       }
+       return ret;
+}
+
+static int ttm_bo_mem_compat(uint32_t proposed_placement,
+                            struct ttm_mem_reg *mem)
+{
+       if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
+               return 0;
+       if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
+               return 0;
+
+       return 1;
+}
+
+int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
+                              uint32_t proposed_placement,
+                              bool interruptible, bool no_wait)
+{
+       int ret;
+
+       BUG_ON(!atomic_read(&bo->reserved));
+       bo->proposed_placement = proposed_placement;
+
+       TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
+                 (unsigned long)proposed_placement,
+                 (unsigned long)bo->mem.placement);
+
+       /*
+        * Check whether we need to move buffer.
+        */
+
+       if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
+               ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
+                                        interruptible, no_wait);
+               if (ret) {
+                       if (ret != -ERESTART)
+                               printk(KERN_ERR TTM_PFX
+                                      "Failed moving buffer. "
+                                      "Proposed placement 0x%08x\n",
+                                      bo->proposed_placement);
+                       if (ret == -ENOMEM)
+                               printk(KERN_ERR TTM_PFX
+                                      "Out of aperture space or "
+                                      "DRM memory quota.\n");
+                       return ret;
+               }
+       }
+
+       /*
+        * We might need to add a TTM.
+        */
+
+       if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+               ret = ttm_bo_add_ttm(bo, true);
+               if (ret)
+                       return ret;
+       }
+       /*
+        * Validation has succeeded, move the access and other
+        * non-mapping-related flag bits from the proposed flags to
+        * the active flags
+        */
+
+       ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
+                       ~TTM_PL_MASK_MEMTYPE);
+
+       return 0;
+}
+EXPORT_SYMBOL(ttm_buffer_object_validate);
+
+int
+ttm_bo_check_placement(struct ttm_buffer_object *bo,
+                      uint32_t set_flags, uint32_t clr_flags)
+{
+       uint32_t new_mask = set_flags | clr_flags;
+
+       if ((bo->type == ttm_bo_type_user) &&
+           (clr_flags & TTM_PL_FLAG_CACHED)) {
+               printk(KERN_ERR TTM_PFX
+                      "User buffers require cache-coherent memory.\n");
+               return -EINVAL;
+       }
+
+       if (!capable(CAP_SYS_ADMIN)) {
+               if (new_mask & TTM_PL_FLAG_NO_EVICT) {
+                       printk(KERN_ERR TTM_PFX "Need to be root to modify"
+                              " NO_EVICT status.\n");
+                       return -EINVAL;
+               }
+
+               if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
+                   (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+                       printk(KERN_ERR TTM_PFX
+                              "Incompatible memory specification"
+                              " for NO_EVICT buffer.\n");
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+int ttm_buffer_object_init(struct ttm_bo_device *bdev,
+                          struct ttm_buffer_object *bo,
+                          unsigned long size,
+                          enum ttm_bo_type type,
+                          uint32_t flags,
+                          uint32_t page_alignment,
+                          unsigned long buffer_start,
+                          bool interruptible,
+                          struct file *persistant_swap_storage,
+                          size_t acc_size,
+                          void (*destroy) (struct ttm_buffer_object *))
+{
+       int ret = 0;
+       unsigned long num_pages;
+
+       size += buffer_start & ~PAGE_MASK;
+       num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if (num_pages == 0) {
+               printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
+               return -EINVAL;
+       }
+       bo->destroy = destroy;
+
+       spin_lock_init(&bo->lock);
+       kref_init(&bo->kref);
+       kref_init(&bo->list_kref);
+       atomic_set(&bo->cpu_writers, 0);
+       atomic_set(&bo->reserved, 1);
+       init_waitqueue_head(&bo->event_queue);
+       INIT_LIST_HEAD(&bo->lru);
+       INIT_LIST_HEAD(&bo->ddestroy);
+       INIT_LIST_HEAD(&bo->swap);
+       bo->bdev = bdev;
+       bo->type = type;
+       bo->num_pages = num_pages;
+       bo->mem.mem_type = TTM_PL_SYSTEM;
+       bo->mem.num_pages = bo->num_pages;
+       bo->mem.mm_node = NULL;
+       bo->mem.page_alignment = page_alignment;
+       bo->buffer_start = buffer_start & PAGE_MASK;
+       bo->priv_flags = 0;
+       bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
+       bo->seq_valid = false;
+       bo->persistant_swap_storage = persistant_swap_storage;
+       bo->acc_size = acc_size;
+
+       ret = ttm_bo_check_placement(bo, flags, 0ULL);
+       if (unlikely(ret != 0))
+               goto out_err;
+
+       /*
+        * If no caching attributes are set, accept any form of caching.
+        */
+
+       if ((flags & TTM_PL_MASK_CACHING) == 0)
+               flags |= TTM_PL_MASK_CACHING;
+
+       /*
+        * For ttm_bo_type_device buffers, allocate
+        * address space from the device.
+        */
+
+       if (bo->type == ttm_bo_type_device) {
+               ret = ttm_bo_setup_vm(bo);
+               if (ret)
+                       goto out_err;
+       }
+
+       ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+       if (ret)
+               goto out_err;
+
+       ttm_bo_unreserve(bo);
+       return 0;
+
+out_err:
+       ttm_bo_unreserve(bo);
+       ttm_bo_unref(&bo);
+
+       return ret;
+}
+EXPORT_SYMBOL(ttm_buffer_object_init);
+
+static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
+                                unsigned long num_pages)
+{
+       size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
+           PAGE_MASK;
+
+       return bdev->ttm_bo_size + 2 * page_array_size;
+}
+
+int ttm_buffer_object_create(struct ttm_bo_device *bdev,
+                            unsigned long size,
+                            enum ttm_bo_type type,
+                            uint32_t flags,
+                            uint32_t page_alignment,
+                            unsigned long buffer_start,
+                            bool interruptible,
+                            struct file *persistant_swap_storage,
+                            struct ttm_buffer_object **p_bo)
+{
+       struct ttm_buffer_object *bo;
+       int ret;
+       struct ttm_mem_global *mem_glob = bdev->mem_glob;
+
+       size_t acc_size =
+           ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+       ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
+       if (unlikely(ret != 0))
+               return ret;
+
+       bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+
+       if (unlikely(bo == NULL)) {
+               ttm_mem_global_free(mem_glob, acc_size, false);
+               return -ENOMEM;
+       }
+
+       ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
+                                    page_alignment, buffer_start,
+                                    interruptible,
+                                    persistant_swap_storage, acc_size, NULL);
+       if (likely(ret == 0))
+               *p_bo = bo;
+
+       return ret;
+}
+
+static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
+                            uint32_t mem_type, bool allow_errors)
+{
+       int ret;
+
+       spin_lock(&bo->lock);
+       ret = ttm_bo_wait(bo, false, false, false);
+       spin_unlock(&bo->lock);
+
+       if (ret && allow_errors)
+               goto out;
+
+       if (bo->mem.mem_type == mem_type)
+               ret = ttm_bo_evict(bo, mem_type, false, false);
+
+       if (ret) {
+               if (allow_errors) {
+                       goto out;
+               } else {
+                       ret = 0;
+                       printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
+               }
+       }
+
+out:
+       return ret;
+}
+
+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
+                                  struct list_head *head,
+                                  unsigned mem_type, bool allow_errors)
+{
+       struct ttm_buffer_object *entry;
+       int ret;
+       int put_count;
+
+       /*
+        * Can't use standard list traversal since we're unlocking.
+        */
+
+       spin_lock(&bdev->lru_lock);
+
+       while (!list_empty(head)) {
+               entry = list_first_entry(head, struct ttm_buffer_object, lru);
+               kref_get(&entry->list_kref);
+               ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
+               put_count = ttm_bo_del_from_lru(entry);
+               spin_unlock(&bdev->lru_lock);
+               while (put_count--)
+                       kref_put(&entry->list_kref, ttm_bo_ref_bug);
+               BUG_ON(ret);
+               ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
+               ttm_bo_unreserve(entry);
+               kref_put(&entry->list_kref, ttm_bo_release_list);
+               spin_lock(&bdev->lru_lock);
+       }
+
+       spin_unlock(&bdev->lru_lock);
+
+       return 0;
+}
+
+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+       int ret = -EINVAL;
+
+       if (mem_type >= TTM_NUM_MEM_TYPES) {
+               printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
+               return ret;
+       }
+
+       if (!man->has_type) {
+               printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
+                      "memory manager type %u\n", mem_type);
+               return ret;
+       }
+
+       man->use_type = false;
+       man->has_type = false;
+
+       ret = 0;
+       if (mem_type > 0) {
+               ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+
+               spin_lock(&bdev->lru_lock);
+               if (drm_mm_clean(&man->manager))
+                       drm_mm_takedown(&man->manager);
+               else
+                       ret = -EBUSY;
+
+               spin_unlock(&bdev->lru_lock);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_clean_mm);
+
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+       if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
+               printk(KERN_ERR TTM_PFX
+                      "Illegal memory manager memory type %u.\n",
+                      mem_type);
+               return -EINVAL;
+       }
+
+       if (!man->has_type) {
+               printk(KERN_ERR TTM_PFX
+                      "Memory type %u has not been initialized.\n",
+                      mem_type);
+               return 0;
+       }
+
+       return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+}
+EXPORT_SYMBOL(ttm_bo_evict_mm);
+
+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+                  unsigned long p_offset, unsigned long p_size)
+{
+       int ret = -EINVAL;
+       struct ttm_mem_type_manager *man;
+
+       if (type >= TTM_NUM_MEM_TYPES) {
+               printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
+               return ret;
+       }
+
+       man = &bdev->man[type];
+       if (man->has_type) {
+               printk(KERN_ERR TTM_PFX
+                      "Memory manager already initialized for type %d\n",
+                      type);
+               return ret;
+       }
+
+       ret = bdev->driver->init_mem_type(bdev, type, man);
+       if (ret)
+               return ret;
+
+       ret = 0;
+       if (type != TTM_PL_SYSTEM) {
+               if (!p_size) {
+                       printk(KERN_ERR TTM_PFX
+                              "Zero size memory manager type %d\n",
+                              type);
+                       return ret;
+               }
+               ret = drm_mm_init(&man->manager, p_offset, p_size);
+               if (ret)
+                       return ret;
+       }
+       man->has_type = true;
+       man->use_type = true;
+       man->size = p_size;
+
+       INIT_LIST_HEAD(&man->lru);
+
+       return 0;
+}
+EXPORT_SYMBOL(ttm_bo_init_mm);
+
+int ttm_bo_device_release(struct ttm_bo_device *bdev)
+{
+       int ret = 0;
+       unsigned i = TTM_NUM_MEM_TYPES;
+       struct ttm_mem_type_manager *man;
+
+       while (i--) {
+               man = &bdev->man[i];
+               if (man->has_type) {
+                       man->use_type = false;
+                       if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
+                               ret = -EBUSY;
+                               printk(KERN_ERR TTM_PFX
+                                      "DRM memory manager type %d "
+                                      "is not clean.\n", i);
+                       }
+                       man->has_type = false;
+               }
+       }
+
+       if (!cancel_delayed_work(&bdev->wq))
+               flush_scheduled_work();
+
+       while (ttm_bo_delayed_delete(bdev, true))
+               ;
+
+       spin_lock(&bdev->lru_lock);
+       if (list_empty(&bdev->ddestroy))
+               TTM_DEBUG("Delayed destroy list was clean\n");
+
+       if (list_empty(&bdev->man[0].lru))
+               TTM_DEBUG("Swap list was clean\n");
+       spin_unlock(&bdev->lru_lock);
+
+       ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
+       BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
+       write_lock(&bdev->vm_lock);
+       drm_mm_takedown(&bdev->addr_space_mm);
+       write_unlock(&bdev->vm_lock);
+
+       __free_page(bdev->dummy_read_page);
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_device_release);
+
+/*
+ * This function is intended to be called on drm driver load.
+ * If you decide to call it from firstopen, you must protect the call
+ * from a potentially racing ttm_bo_driver_finish in lastclose.
+ * (This may happen on X server restart).
+ */
+
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
+                      struct ttm_mem_global *mem_glob,
+                      struct ttm_bo_driver *driver, uint64_t file_page_offset)
+{
+       int ret = -EINVAL;
+
+       bdev->dummy_read_page = NULL;
+       rwlock_init(&bdev->vm_lock);
+       spin_lock_init(&bdev->lru_lock);
+
+       bdev->driver = driver;
+       bdev->mem_glob = mem_glob;
+
+       memset(bdev->man, 0, sizeof(bdev->man));
+
+       bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+       if (unlikely(bdev->dummy_read_page == NULL)) {
+               ret = -ENOMEM;
+               goto out_err0;
+       }
+
+       /*
+        * Initialize the system memory buffer type.
+        * Other types need to be driver / IOCTL initialized.
+        */
+       ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+       if (unlikely(ret != 0))
+               goto out_err1;
+
+       bdev->addr_space_rb = RB_ROOT;
+       ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
+       if (unlikely(ret != 0))
+               goto out_err2;
+
+       INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
+       bdev->nice_mode = true;
+       INIT_LIST_HEAD(&bdev->ddestroy);
+       INIT_LIST_HEAD(&bdev->swap_lru);
+       bdev->dev_mapping = NULL;
+       ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
+       ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
+       if (unlikely(ret != 0)) {
+               printk(KERN_ERR TTM_PFX
+                      "Could not register buffer object swapout.\n");
+               goto out_err2;
+       }
+
+       bdev->ttm_bo_extra_size =
+               ttm_round_pot(sizeof(struct ttm_tt)) +
+               ttm_round_pot(sizeof(struct ttm_backend));
+
+       bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
+               ttm_round_pot(sizeof(struct ttm_buffer_object));
+
+       return 0;
+out_err2:
+       ttm_bo_clean_mm(bdev, 0);
+out_err1:
+       __free_page(bdev->dummy_read_page);
+out_err0:
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_device_init);
+
+/*
+ * buffer object vm functions.
+ */
+
+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+       if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+               if (mem->mem_type == TTM_PL_SYSTEM)
+                       return false;
+
+               if (man->flags & TTM_MEMTYPE_FLAG_CMA)
+                       return false;
+
+               if (mem->placement & TTM_PL_FLAG_CACHED)
+                       return false;
+       }
+       return true;
+}
+
+int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
+                     struct ttm_mem_reg *mem,
+                     unsigned long *bus_base,
+                     unsigned long *bus_offset, unsigned long *bus_size)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+       *bus_size = 0;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+
+       if (ttm_mem_reg_is_pci(bdev, mem)) {
+               *bus_offset = mem->mm_node->start << PAGE_SHIFT;
+               *bus_size = mem->num_pages << PAGE_SHIFT;
+               *bus_base = man->io_offset;
+       }
+
+       return 0;
+}
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       loff_t offset = (loff_t) bo->addr_space_offset;
+       loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
+
+       if (!bdev->dev_mapping)
+               return;
+
+       unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+}
+
+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct rb_node **cur = &bdev->addr_space_rb.rb_node;
+       struct rb_node *parent = NULL;
+       struct ttm_buffer_object *cur_bo;
+       unsigned long offset = bo->vm_node->start;
+       unsigned long cur_offset;
+
+       while (*cur) {
+               parent = *cur;
+               cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
+               cur_offset = cur_bo->vm_node->start;
+               if (offset < cur_offset)
+                       cur = &parent->rb_left;
+               else if (offset > cur_offset)
+                       cur = &parent->rb_right;
+               else
+                       BUG();
+       }
+
+       rb_link_node(&bo->vm_rb, parent, cur);
+       rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
+}
+
+/**
+ * ttm_bo_setup_vm:
+ *
+ * @bo: the buffer to allocate address space for
+ *
+ * Allocate address space in the drm device so that applications
+ * can mmap the buffer and access the contents. This only
+ * applies to ttm_bo_type_device objects as others are not
+ * placed in the drm device address space.
+ */
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       int ret;
+
+retry_pre_get:
+       ret = drm_mm_pre_get(&bdev->addr_space_mm);
+       if (unlikely(ret != 0))
+               return ret;
+
+       write_lock(&bdev->vm_lock);
+       bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
+                                        bo->mem.num_pages, 0, 0);
+
+       if (unlikely(bo->vm_node == NULL)) {
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
+
+       bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
+                                             bo->mem.num_pages, 0);
+
+       if (unlikely(bo->vm_node == NULL)) {
+               write_unlock(&bdev->vm_lock);
+               goto retry_pre_get;
+       }
+
+       ttm_bo_vm_insert_rb(bo);
+       write_unlock(&bdev->vm_lock);
+       bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
+
+       return 0;
+out_unlock:
+       write_unlock(&bdev->vm_lock);
+       return ret;
+}
+
+int ttm_bo_wait(struct ttm_buffer_object *bo,
+               bool lazy, bool interruptible, bool no_wait)
+{
+       struct ttm_bo_driver *driver = bo->bdev->driver;
+       void *sync_obj;
+       void *sync_obj_arg;
+       int ret = 0;
+
+       if (likely(bo->sync_obj == NULL))
+               return 0;
+
+       while (bo->sync_obj) {
+
+               if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
+                       void *tmp_obj = bo->sync_obj;
+                       bo->sync_obj = NULL;
+                       clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+                       spin_unlock(&bo->lock);
+                       driver->sync_obj_unref(&tmp_obj);
+                       spin_lock(&bo->lock);
+                       continue;
+               }
+
+               if (no_wait)
+                       return -EBUSY;
+
+               sync_obj = driver->sync_obj_ref(bo->sync_obj);
+               sync_obj_arg = bo->sync_obj_arg;
+               spin_unlock(&bo->lock);
+               ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
+                                           lazy, interruptible);
+               if (unlikely(ret != 0)) {
+                       driver->sync_obj_unref(&sync_obj);
+                       spin_lock(&bo->lock);
+                       return ret;
+               }
+               spin_lock(&bo->lock);
+               if (likely(bo->sync_obj == sync_obj &&
+                          bo->sync_obj_arg == sync_obj_arg)) {
+                       void *tmp_obj = bo->sync_obj;
+                       bo->sync_obj = NULL;
+                       clear_bit(TTM_BO_PRIV_FLAG_MOVING,
+                                 &bo->priv_flags);
+                       spin_unlock(&bo->lock);
+                       driver->sync_obj_unref(&sync_obj);
+                       driver->sync_obj_unref(&tmp_obj);
+                       spin_lock(&bo->lock);
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL(ttm_bo_wait);
+
+void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
+{
+       atomic_set(&bo->reserved, 0);
+       wake_up_all(&bo->event_queue);
+}
+
+int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
+                            bool no_wait)
+{
+       int ret;
+
+       while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+               if (no_wait)
+                       return -EBUSY;
+               else if (interruptible) {
+                       ret = wait_event_interruptible
+                           (bo->event_queue, atomic_read(&bo->reserved) == 0);
+                       if (unlikely(ret != 0))
+                               return -ERESTART;
+               } else {
+                       wait_event(bo->event_queue,
+                                  atomic_read(&bo->reserved) == 0);
+               }
+       }
+       return 0;
+}
+
+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
+{
+       int ret = 0;
+
+       /*
+        * Using ttm_bo_reserve instead of ttm_bo_block_reservation
+        * makes sure the lru lists are updated.
+        */
+
+       ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+       if (unlikely(ret != 0))
+               return ret;
+       spin_lock(&bo->lock);
+       ret = ttm_bo_wait(bo, false, true, no_wait);
+       spin_unlock(&bo->lock);
+       if (likely(ret == 0))
+               atomic_inc(&bo->cpu_writers);
+       ttm_bo_unreserve(bo);
+       return ret;
+}
+
+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
+{
+       if (atomic_dec_and_test(&bo->cpu_writers))
+               wake_up_all(&bo->event_queue);
+}
+
+/**
+ * A buffer object shrink method that tries to swap out the first
+ * buffer object on the bo_global::swap_lru list.
+ */
+
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+{
+       struct ttm_bo_device *bdev =
+           container_of(shrink, struct ttm_bo_device, shrink);
+       struct ttm_buffer_object *bo;
+       int ret = -EBUSY;
+       int put_count;
+       uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
+
+       spin_lock(&bdev->lru_lock);
+       while (ret == -EBUSY) {
+               if (unlikely(list_empty(&bdev->swap_lru))) {
+                       spin_unlock(&bdev->lru_lock);
+                       return -EBUSY;
+               }
+
+               bo = list_first_entry(&bdev->swap_lru,
+                                     struct ttm_buffer_object, swap);
+               kref_get(&bo->list_kref);
+
+               /**
+                * Reserve buffer. Since we unlock while sleeping, we need
+                * to re-check that nobody removed us from the swap-list while
+                * we slept.
+                */
+
+               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               if (unlikely(ret == -EBUSY)) {
+                       spin_unlock(&bdev->lru_lock);
+                       ttm_bo_wait_unreserved(bo, false);
+                       kref_put(&bo->list_kref, ttm_bo_release_list);
+                       spin_lock(&bdev->lru_lock);
+               }
+       }
+
+       BUG_ON(ret != 0);
+       put_count = ttm_bo_del_from_lru(bo);
+       spin_unlock(&bdev->lru_lock);
+
+       while (put_count--)
+               kref_put(&bo->list_kref, ttm_bo_ref_bug);
+
+       /**
+        * Wait for GPU, then move to system cached.
+        */
+
+       spin_lock(&bo->lock);
+       ret = ttm_bo_wait(bo, false, false, false);
+       spin_unlock(&bo->lock);
+
+       if (unlikely(ret != 0))
+               goto out;
+
+       if ((bo->mem.placement & swap_placement) != swap_placement) {
+               struct ttm_mem_reg evict_mem;
+
+               evict_mem = bo->mem;
+               evict_mem.mm_node = NULL;
+               evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+               evict_mem.mem_type = TTM_PL_SYSTEM;
+
+               ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
+                                            false, false);
+               if (unlikely(ret != 0))
+                       goto out;
+       }
+
+       ttm_bo_unmap_virtual(bo);
+
+       /**
+        * Swap out. Buffer will be swapped in again as soon as
+        * anyone tries to access a ttm page.
+        */
+
+       ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
+out:
+
+       /**
+        *
+        * Unreserve without putting on LRU to avoid swapping out an
+        * already swapped buffer.
+        */
+
+       atomic_set(&bo->reserved, 0);
+       wake_up_all(&bo->event_queue);
+       kref_put(&bo->list_kref, ttm_bo_release_list);
+       return ret;
+}
+
+void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
+{
+       while (ttm_bo_swapout(&bdev->shrink) == 0)
+               ;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
new file mode 100644 (file)
index 0000000..517c845
--- /dev/null
@@ -0,0 +1,561 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/io.h>
+#include <linux/highmem.h>
+#include <linux/wait.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+#include <linux/module.h>
+
+void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
+{
+       struct ttm_mem_reg *old_mem = &bo->mem;
+
+       if (old_mem->mm_node) {
+               spin_lock(&bo->bdev->lru_lock);
+               drm_mm_put_block(old_mem->mm_node);
+               spin_unlock(&bo->bdev->lru_lock);
+       }
+       old_mem->mm_node = NULL;
+}
+
+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+                   bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+{
+       struct ttm_tt *ttm = bo->ttm;
+       struct ttm_mem_reg *old_mem = &bo->mem;
+       uint32_t save_flags = old_mem->placement;
+       int ret;
+
+       if (old_mem->mem_type != TTM_PL_SYSTEM) {
+               ttm_tt_unbind(ttm);
+               ttm_bo_free_old_node(bo);
+               ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
+                               TTM_PL_MASK_MEM);
+               old_mem->mem_type = TTM_PL_SYSTEM;
+               save_flags = old_mem->placement;
+       }
+
+       ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
+       if (unlikely(ret != 0))
+               return ret;
+
+       if (new_mem->mem_type != TTM_PL_SYSTEM) {
+               ret = ttm_tt_bind(ttm, new_mem);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+       ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+       return 0;
+}
+EXPORT_SYMBOL(ttm_bo_move_ttm);
+
+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+                       void **virtual)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       unsigned long bus_offset;
+       unsigned long bus_size;
+       unsigned long bus_base;
+       int ret;
+       void *addr;
+
+       *virtual = NULL;
+       ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
+       if (ret || bus_size == 0)
+               return ret;
+
+       if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+               addr = (void *)(((u8 *) man->io_addr) + bus_offset);
+       else {
+               if (mem->placement & TTM_PL_FLAG_WC)
+                       addr = ioremap_wc(bus_base + bus_offset, bus_size);
+               else
+                       addr = ioremap_nocache(bus_base + bus_offset, bus_size);
+               if (!addr)
+                       return -ENOMEM;
+       }
+       *virtual = addr;
+       return 0;
+}
+
+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+                        void *virtual)
+{
+       struct ttm_mem_type_manager *man;
+
+       man = &bdev->man[mem->mem_type];
+
+       if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+               iounmap(virtual);
+}
+
+static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
+{
+       uint32_t *dstP =
+           (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
+       uint32_t *srcP =
+           (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
+
+       int i;
+       for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
+               iowrite32(ioread32(srcP++), dstP++);
+       return 0;
+}
+
+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
+                               unsigned long page)
+{
+       struct page *d = ttm_tt_get_page(ttm, page);
+       void *dst;
+
+       if (!d)
+               return -ENOMEM;
+
+       src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+       dst = kmap(d);
+       if (!dst)
+               return -ENOMEM;
+
+       memcpy_fromio(dst, src, PAGE_SIZE);
+       kunmap(d);
+       return 0;
+}
+
+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
+                               unsigned long page)
+{
+       struct page *s = ttm_tt_get_page(ttm, page);
+       void *src;
+
+       if (!s)
+               return -ENOMEM;
+
+       dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
+       src = kmap(s);
+       if (!src)
+               return -ENOMEM;
+
+       memcpy_toio(dst, src, PAGE_SIZE);
+       kunmap(s);
+       return 0;
+}
+
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+                      bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+       struct ttm_tt *ttm = bo->ttm;
+       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_mem_reg old_copy = *old_mem;
+       void *old_iomap;
+       void *new_iomap;
+       int ret;
+       uint32_t save_flags = old_mem->placement;
+       unsigned long i;
+       unsigned long page;
+       unsigned long add = 0;
+       int dir;
+
+       ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
+       if (ret)
+               return ret;
+       ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
+       if (ret)
+               goto out;
+
+       if (old_iomap == NULL && new_iomap == NULL)
+               goto out2;
+       if (old_iomap == NULL && ttm == NULL)
+               goto out2;
+
+       add = 0;
+       dir = 1;
+
+       if ((old_mem->mem_type == new_mem->mem_type) &&
+           (new_mem->mm_node->start <
+            old_mem->mm_node->start + old_mem->mm_node->size)) {
+               dir = -1;
+               add = new_mem->num_pages - 1;
+       }
+
+       for (i = 0; i < new_mem->num_pages; ++i) {
+               page = i * dir + add;
+               if (old_iomap == NULL)
+                       ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
+               else if (new_iomap == NULL)
+                       ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
+               else
+                       ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+               if (ret)
+                       goto out1;
+       }
+       mb();
+out2:
+       ttm_bo_free_old_node(bo);
+
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+       ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+
+       if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
+               ttm_tt_unbind(ttm);
+               ttm_tt_destroy(ttm);
+               bo->ttm = NULL;
+       }
+
+out1:
+       ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
+out:
+       ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_move_memcpy);
+
+static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
+{
+       kfree(bo);
+}
+
+/**
+ * ttm_buffer_object_transfer
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
+ * holding the data of @bo with the old placement.
+ *
+ * This is a utility function that may be called after an accelerated move
+ * has been scheduled. A new buffer object is created as a placeholder for
+ * the old data while it's being copied. When that buffer object is idle,
+ * it can be destroyed, releasing the space of the old placement.
+ * Returns:
+ * !0: Failure.
+ */
+
+static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+                                     struct ttm_buffer_object **new_obj)
+{
+       struct ttm_buffer_object *fbo;
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_driver *driver = bdev->driver;
+
+       fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
+       if (!fbo)
+               return -ENOMEM;
+
+       *fbo = *bo;
+
+       /**
+        * Fix up members that we shouldn't copy directly:
+        * TODO: Explicit member copy would probably be better here.
+        */
+
+       spin_lock_init(&fbo->lock);
+       init_waitqueue_head(&fbo->event_queue);
+       INIT_LIST_HEAD(&fbo->ddestroy);
+       INIT_LIST_HEAD(&fbo->lru);
+       INIT_LIST_HEAD(&fbo->swap);
+       fbo->vm_node = NULL;
+
+       fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       if (fbo->mem.mm_node)
+               fbo->mem.mm_node->private = (void *)fbo;
+       kref_init(&fbo->list_kref);
+       kref_init(&fbo->kref);
+       fbo->destroy = &ttm_transfered_destroy;
+
+       *new_obj = fbo;
+       return 0;
+}
+
+pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+{
+#if defined(__i386__) || defined(__x86_64__)
+       if (caching_flags & TTM_PL_FLAG_WC)
+               tmp = pgprot_writecombine(tmp);
+       else if (boot_cpu_data.x86 > 3)
+               tmp = pgprot_noncached(tmp);
+
+#elif defined(__powerpc__)
+       if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
+               pgprot_val(tmp) |= _PAGE_NO_CACHE;
+               if (caching_flags & TTM_PL_FLAG_UNCACHED)
+                       pgprot_val(tmp) |= _PAGE_GUARDED;
+       }
+#endif
+#if defined(__ia64__)
+       if (caching_flags & TTM_PL_FLAG_WC)
+               tmp = pgprot_writecombine(tmp);
+       else
+               tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__sparc__)
+       if (!(caching_flags & TTM_PL_FLAG_CACHED))
+               tmp = pgprot_noncached(tmp);
+#endif
+       return tmp;
+}
+
+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
+                         unsigned long bus_base,
+                         unsigned long bus_offset,
+                         unsigned long bus_size,
+                         struct ttm_bo_kmap_obj *map)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_mem_reg *mem = &bo->mem;
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+       if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+               map->bo_kmap_type = ttm_bo_map_premapped;
+               map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+       } else {
+               map->bo_kmap_type = ttm_bo_map_iomap;
+               if (mem->placement & TTM_PL_FLAG_WC)
+                       map->virtual = ioremap_wc(bus_base + bus_offset,
+                                                 bus_size);
+               else
+                       map->virtual = ioremap_nocache(bus_base + bus_offset,
+                                                      bus_size);
+       }
+       return (!map->virtual) ? -ENOMEM : 0;
+}
+
+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+                          unsigned long start_page,
+                          unsigned long num_pages,
+                          struct ttm_bo_kmap_obj *map)
+{
+       struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
+       struct ttm_tt *ttm = bo->ttm;
+       struct page *d;
+       int i;
+
+       BUG_ON(!ttm);
+       if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+               /*
+                * We're mapping a single page, and the desired
+                * page protection is consistent with the bo.
+                */
+
+               map->bo_kmap_type = ttm_bo_map_kmap;
+               map->page = ttm_tt_get_page(ttm, start_page);
+               map->virtual = kmap(map->page);
+       } else {
+           /*
+            * Populate the part we're mapping;
+            */
+               for (i = start_page; i < start_page + num_pages; ++i) {
+                       d = ttm_tt_get_page(ttm, i);
+                       if (!d)
+                               return -ENOMEM;
+               }
+
+               /*
+                * We need to use vmap to get the desired page protection
+                * or to make the buffer object look contigous.
+                */
+               prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+                       PAGE_KERNEL :
+                       ttm_io_prot(mem->placement, PAGE_KERNEL);
+               map->bo_kmap_type = ttm_bo_map_vmap;
+               map->virtual = vmap(ttm->pages + start_page, num_pages,
+                                   0, prot);
+       }
+       return (!map->virtual) ? -ENOMEM : 0;
+}
+
+int ttm_bo_kmap(struct ttm_buffer_object *bo,
+               unsigned long start_page, unsigned long num_pages,
+               struct ttm_bo_kmap_obj *map)
+{
+       int ret;
+       unsigned long bus_base;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+
+       BUG_ON(!list_empty(&bo->swap));
+       map->virtual = NULL;
+       if (num_pages > bo->num_pages)
+               return -EINVAL;
+       if (start_page > bo->num_pages)
+               return -EINVAL;
+#if 0
+       if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+               return -EPERM;
+#endif
+       ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
+                               &bus_offset, &bus_size);
+       if (ret)
+               return ret;
+       if (bus_size == 0) {
+               return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
+       } else {
+               bus_offset += start_page << PAGE_SHIFT;
+               bus_size = num_pages << PAGE_SHIFT;
+               return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+       }
+}
+EXPORT_SYMBOL(ttm_bo_kmap);
+
+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
+{
+       if (!map->virtual)
+               return;
+       switch (map->bo_kmap_type) {
+       case ttm_bo_map_iomap:
+               iounmap(map->virtual);
+               break;
+       case ttm_bo_map_vmap:
+               vunmap(map->virtual);
+               break;
+       case ttm_bo_map_kmap:
+               kunmap(map->page);
+               break;
+       case ttm_bo_map_premapped:
+               break;
+       default:
+               BUG();
+       }
+       map->virtual = NULL;
+       map->page = NULL;
+}
+EXPORT_SYMBOL(ttm_bo_kunmap);
+
+int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
+                   unsigned long dst_offset,
+                   unsigned long *pfn, pgprot_t *prot)
+{
+       struct ttm_mem_reg *mem = &bo->mem;
+       struct ttm_bo_device *bdev = bo->bdev;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+       unsigned long bus_base;
+       int ret;
+       ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
+                       &bus_size);
+       if (ret)
+               return -EINVAL;
+       if (bus_size != 0)
+               *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
+       else
+               if (!bo->ttm)
+                       return -EINVAL;
+               else
+                       *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
+                                                          dst_offset >>
+                                                          PAGE_SHIFT));
+       *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+               PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
+
+       return 0;
+}
+
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+                             void *sync_obj,
+                             void *sync_obj_arg,
+                             bool evict, bool no_wait,
+                             struct ttm_mem_reg *new_mem)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_driver *driver = bdev->driver;
+       struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+       struct ttm_mem_reg *old_mem = &bo->mem;
+       int ret;
+       uint32_t save_flags = old_mem->placement;
+       struct ttm_buffer_object *ghost_obj;
+       void *tmp_obj = NULL;
+
+       spin_lock(&bo->lock);
+       if (bo->sync_obj) {
+               tmp_obj = bo->sync_obj;
+               bo->sync_obj = NULL;
+       }
+       bo->sync_obj = driver->sync_obj_ref(sync_obj);
+       bo->sync_obj_arg = sync_obj_arg;
+       if (evict) {
+               ret = ttm_bo_wait(bo, false, false, false);
+               spin_unlock(&bo->lock);
+               driver->sync_obj_unref(&bo->sync_obj);
+
+               if (ret)
+                       return ret;
+
+               ttm_bo_free_old_node(bo);
+               if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+                   (bo->ttm != NULL)) {
+                       ttm_tt_unbind(bo->ttm);
+                       ttm_tt_destroy(bo->ttm);
+                       bo->ttm = NULL;
+               }
+       } else {
+               /**
+                * This should help pipeline ordinary buffer moves.
+                *
+                * Hang old buffer memory on a new buffer object,
+                * and leave it to be released when the GPU
+                * operation has completed.
+                */
+
+               set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+               spin_unlock(&bo->lock);
+
+               ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+               if (ret)
+                       return ret;
+
+               /**
+                * If we're not moving to fixed memory, the TTM object
+                * needs to stay alive. Otherwhise hang it on the ghost
+                * bo to be unbound and destroyed.
+                */
+
+               if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
+                       ghost_obj->ttm = NULL;
+               else
+                       bo->ttm = NULL;
+
+               ttm_bo_unreserve(ghost_obj);
+               ttm_bo_unref(&ghost_obj);
+       }
+
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+       ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+       return 0;
+}
+EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
new file mode 100644 (file)
index 0000000..27b146c
--- /dev/null
@@ -0,0 +1,454 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <linux/mm.h>
+#include <linux/version.h>
+#include <linux/rbtree.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
+                                                    unsigned long page_start,
+                                                    unsigned long num_pages)
+{
+       struct rb_node *cur = bdev->addr_space_rb.rb_node;
+       unsigned long cur_offset;
+       struct ttm_buffer_object *bo;
+       struct ttm_buffer_object *best_bo = NULL;
+
+       while (likely(cur != NULL)) {
+               bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
+               cur_offset = bo->vm_node->start;
+               if (page_start >= cur_offset) {
+                       cur = cur->rb_right;
+                       best_bo = bo;
+                       if (page_start == cur_offset)
+                               break;
+               } else
+                       cur = cur->rb_left;
+       }
+
+       if (unlikely(best_bo == NULL))
+               return NULL;
+
+       if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
+                    (page_start + num_pages)))
+               return NULL;
+
+       return best_bo;
+}
+
+static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+           vma->vm_private_data;
+       struct ttm_bo_device *bdev = bo->bdev;
+       unsigned long bus_base;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+       unsigned long page_offset;
+       unsigned long page_last;
+       unsigned long pfn;
+       struct ttm_tt *ttm = NULL;
+       struct page *page;
+       int ret;
+       int i;
+       bool is_iomem;
+       unsigned long address = (unsigned long)vmf->virtual_address;
+       int retval = VM_FAULT_NOPAGE;
+
+       /*
+        * Work around locking order reversal in fault / nopfn
+        * between mmap_sem and bo_reserve: Perform a trylock operation
+        * for reserve, and if it fails, retry the fault after scheduling.
+        */
+
+       ret = ttm_bo_reserve(bo, true, true, false, 0);
+       if (unlikely(ret != 0)) {
+               if (ret == -EBUSY)
+                       set_need_resched();
+               return VM_FAULT_NOPAGE;
+       }
+
+       /*
+        * Wait for buffer data in transit, due to a pipelined
+        * move.
+        */
+
+       spin_lock(&bo->lock);
+       if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
+               ret = ttm_bo_wait(bo, false, true, false);
+               spin_unlock(&bo->lock);
+               if (unlikely(ret != 0)) {
+                       retval = (ret != -ERESTART) ?
+                           VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
+                       goto out_unlock;
+               }
+       } else
+               spin_unlock(&bo->lock);
+
+
+       ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
+                               &bus_size);
+       if (unlikely(ret != 0)) {
+               retval = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+       is_iomem = (bus_size != 0);
+
+       page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
+           bo->vm_node->start - vma->vm_pgoff;
+       page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
+           bo->vm_node->start - vma->vm_pgoff;
+
+       if (unlikely(page_offset >= bo->num_pages)) {
+               retval = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+       /*
+        * Strictly, we're not allowed to modify vma->vm_page_prot here,
+        * since the mmap_sem is only held in read mode. However, we
+        * modify only the caching bits of vma->vm_page_prot and
+        * consider those bits protected by
+        * the bo->mutex, as we should be the only writers.
+        * There shouldn't really be any readers of these bits except
+        * within vm_insert_mixed()? fork?
+        *
+        * TODO: Add a list of vmas to the bo, and change the
+        * vma->vm_page_prot when the object changes caching policy, with
+        * the correct locks held.
+        */
+
+       if (is_iomem) {
+               vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
+                                               vma->vm_page_prot);
+       } else {
+               ttm = bo->ttm;
+               vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
+                   vm_get_page_prot(vma->vm_flags) :
+                   ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+       }
+
+       /*
+        * Speculatively prefault a number of pages. Only error on
+        * first page.
+        */
+
+       for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
+
+               if (is_iomem)
+                       pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
+                           page_offset;
+               else {
+                       page = ttm_tt_get_page(ttm, page_offset);
+                       if (unlikely(!page && i == 0)) {
+                               retval = VM_FAULT_OOM;
+                               goto out_unlock;
+                       } else if (unlikely(!page)) {
+                               break;
+                       }
+                       pfn = page_to_pfn(page);
+               }
+
+               ret = vm_insert_mixed(vma, address, pfn);
+               /*
+                * Somebody beat us to this PTE or prefaulting to
+                * an already populated PTE, or prefaulting error.
+                */
+
+               if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+                       break;
+               else if (unlikely(ret != 0)) {
+                       retval =
+                           (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+                       goto out_unlock;
+
+               }
+
+               address += PAGE_SIZE;
+               if (unlikely(++page_offset >= page_last))
+                       break;
+       }
+
+out_unlock:
+       ttm_bo_unreserve(bo);
+       return retval;
+}
+
+static void ttm_bo_vm_open(struct vm_area_struct *vma)
+{
+       struct ttm_buffer_object *bo =
+           (struct ttm_buffer_object *)vma->vm_private_data;
+
+       (void)ttm_bo_reference(bo);
+}
+
+static void ttm_bo_vm_close(struct vm_area_struct *vma)
+{
+       struct ttm_buffer_object *bo =
+           (struct ttm_buffer_object *)vma->vm_private_data;
+
+       ttm_bo_unref(&bo);
+       vma->vm_private_data = NULL;
+}
+
+static struct vm_operations_struct ttm_bo_vm_ops = {
+       .fault = ttm_bo_vm_fault,
+       .open = ttm_bo_vm_open,
+       .close = ttm_bo_vm_close
+};
+
+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+               struct ttm_bo_device *bdev)
+{
+       struct ttm_bo_driver *driver;
+       struct ttm_buffer_object *bo;
+       int ret;
+
+       read_lock(&bdev->vm_lock);
+       bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
+                                (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+       if (likely(bo != NULL))
+               ttm_bo_reference(bo);
+       read_unlock(&bdev->vm_lock);
+
+       if (unlikely(bo == NULL)) {
+               printk(KERN_ERR TTM_PFX
+                      "Could not find buffer object to map.\n");
+               return -EINVAL;
+       }
+
+       driver = bo->bdev->driver;
+       if (unlikely(!driver->verify_access)) {
+               ret = -EPERM;
+               goto out_unref;
+       }
+       ret = driver->verify_access(bo, filp);
+       if (unlikely(ret != 0))
+               goto out_unref;
+
+       vma->vm_ops = &ttm_bo_vm_ops;
+
+       /*
+        * Note: We're transferring the bo reference to
+        * vma->vm_private_data here.
+        */
+
+       vma->vm_private_data = bo;
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+       return 0;
+out_unref:
+       ttm_bo_unref(&bo);
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_mmap);
+
+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+{
+       if (vma->vm_pgoff != 0)
+               return -EACCES;
+
+       vma->vm_ops = &ttm_bo_vm_ops;
+       vma->vm_private_data = ttm_bo_reference(bo);
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+       return 0;
+}
+EXPORT_SYMBOL(ttm_fbdev_mmap);
+
+
+ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+                 const char __user *wbuf, char __user *rbuf, size_t count,
+                 loff_t *f_pos, bool write)
+{
+       struct ttm_buffer_object *bo;
+       struct ttm_bo_driver *driver;
+       struct ttm_bo_kmap_obj map;
+       unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
+       unsigned long kmap_offset;
+       unsigned long kmap_end;
+       unsigned long kmap_num;
+       size_t io_size;
+       unsigned int page_offset;
+       char *virtual;
+       int ret;
+       bool no_wait = false;
+       bool dummy;
+
+       read_lock(&bdev->vm_lock);
+       bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
+       if (likely(bo != NULL))
+               ttm_bo_reference(bo);
+       read_unlock(&bdev->vm_lock);
+
+       if (unlikely(bo == NULL))
+               return -EFAULT;
+
+       driver = bo->bdev->driver;
+       if (unlikely(driver->verify_access)) {
+               ret = -EPERM;
+               goto out_unref;
+       }
+
+       ret = driver->verify_access(bo, filp);
+       if (unlikely(ret != 0))
+               goto out_unref;
+
+       kmap_offset = dev_offset - bo->vm_node->start;
+       if (unlikely(kmap_offset) >= bo->num_pages) {
+               ret = -EFBIG;
+               goto out_unref;
+       }
+
+       page_offset = *f_pos & ~PAGE_MASK;
+       io_size = bo->num_pages - kmap_offset;
+       io_size = (io_size << PAGE_SHIFT) - page_offset;
+       if (count < io_size)
+               io_size = count;
+
+       kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+       kmap_num = kmap_end - kmap_offset + 1;
+
+       ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+       switch (ret) {
+       case 0:
+               break;
+       case -ERESTART:
+               ret = -EINTR;
+               goto out_unref;
+       case -EBUSY:
+               ret = -EAGAIN;
+               goto out_unref;
+       default:
+               goto out_unref;
+       }
+
+       ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+       if (unlikely(ret != 0)) {
+               ttm_bo_unreserve(bo);
+               goto out_unref;
+       }
+
+       virtual = ttm_kmap_obj_virtual(&map, &dummy);
+       virtual += page_offset;
+
+       if (write)
+               ret = copy_from_user(virtual, wbuf, io_size);
+       else
+               ret = copy_to_user(rbuf, virtual, io_size);
+
+       ttm_bo_kunmap(&map);
+       ttm_bo_unreserve(bo);
+       ttm_bo_unref(&bo);
+
+       if (unlikely(ret != 0))
+               return -EFBIG;
+
+       *f_pos += io_size;
+
+       return io_size;
+out_unref:
+       ttm_bo_unref(&bo);
+       return ret;
+}
+
+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
+                       char __user *rbuf, size_t count, loff_t *f_pos,
+                       bool write)
+{
+       struct ttm_bo_kmap_obj map;
+       unsigned long kmap_offset;
+       unsigned long kmap_end;
+       unsigned long kmap_num;
+       size_t io_size;
+       unsigned int page_offset;
+       char *virtual;
+       int ret;
+       bool no_wait = false;
+       bool dummy;
+
+       kmap_offset = (*f_pos >> PAGE_SHIFT);
+       if (unlikely(kmap_offset) >= bo->num_pages)
+               return -EFBIG;
+
+       page_offset = *f_pos & ~PAGE_MASK;
+       io_size = bo->num_pages - kmap_offset;
+       io_size = (io_size << PAGE_SHIFT) - page_offset;
+       if (count < io_size)
+               io_size = count;
+
+       kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+       kmap_num = kmap_end - kmap_offset + 1;
+
+       ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+       switch (ret) {
+       case 0:
+               break;
+       case -ERESTART:
+               return -EINTR;
+       case -EBUSY:
+               return -EAGAIN;
+       default:
+               return ret;
+       }
+
+       ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+       if (unlikely(ret != 0)) {
+               ttm_bo_unreserve(bo);
+               return ret;
+       }
+
+       virtual = ttm_kmap_obj_virtual(&map, &dummy);
+       virtual += page_offset;
+
+       if (write)
+               ret = copy_from_user(virtual, wbuf, io_size);
+       else
+               ret = copy_to_user(rbuf, virtual, io_size);
+
+       ttm_bo_kunmap(&map);
+       ttm_bo_unreserve(bo);
+       ttm_bo_unref(&bo);
+
+       if (unlikely(ret != 0))
+               return ret;
+
+       *f_pos += io_size;
+
+       return io_size;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
new file mode 100644 (file)
index 0000000..0b14eb1
--- /dev/null
@@ -0,0 +1,114 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_module.h"
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+struct ttm_global_item {
+       struct mutex mutex;
+       void *object;
+       int refcount;
+};
+
+static struct ttm_global_item glob[TTM_GLOBAL_NUM];
+
+void ttm_global_init(void)
+{
+       int i;
+
+       for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
+               struct ttm_global_item *item = &glob[i];
+               mutex_init(&item->mutex);
+               item->object = NULL;
+               item->refcount = 0;
+       }
+}
+
+void ttm_global_release(void)
+{
+       int i;
+       for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
+               struct ttm_global_item *item = &glob[i];
+               BUG_ON(item->object != NULL);
+               BUG_ON(item->refcount != 0);
+       }
+}
+
+int ttm_global_item_ref(struct ttm_global_reference *ref)
+{
+       int ret;
+       struct ttm_global_item *item = &glob[ref->global_type];
+       void *object;
+
+       mutex_lock(&item->mutex);
+       if (item->refcount == 0) {
+               item->object = kmalloc(ref->size, GFP_KERNEL);
+               if (unlikely(item->object == NULL)) {
+                       ret = -ENOMEM;
+                       goto out_err;
+               }
+
+               ref->object = item->object;
+               ret = ref->init(ref);
+               if (unlikely(ret != 0))
+                       goto out_err;
+
+               ++item->refcount;
+       }
+       ref->object = item->object;
+       object = item->object;
+       mutex_unlock(&item->mutex);
+       return 0;
+out_err:
+       kfree(item->object);
+       mutex_unlock(&item->mutex);
+       item->object = NULL;
+       return ret;
+}
+EXPORT_SYMBOL(ttm_global_item_ref);
+
+void ttm_global_item_unref(struct ttm_global_reference *ref)
+{
+       struct ttm_global_item *item = &glob[ref->global_type];
+
+       mutex_lock(&item->mutex);
+       BUG_ON(item->refcount == 0);
+       BUG_ON(ref->object != item->object);
+       if (--item->refcount == 0) {
+               ref->release(ref);
+               kfree(item->object);
+               item->object = NULL;
+       }
+       mutex_unlock(&item->mutex);
+}
+EXPORT_SYMBOL(ttm_global_item_unref);
+
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
new file mode 100644 (file)
index 0000000..87323d4
--- /dev/null
@@ -0,0 +1,234 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_memory.h"
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#define TTM_PFX "[TTM] "
+#define TTM_MEMORY_ALLOC_RETRIES 4
+
+/**
+ * At this point we only support a single shrink callback.
+ * Extend this if needed, perhaps using a linked list of callbacks.
+ * Note that this function is reentrant:
+ * many threads may try to swap out at any given time.
+ */
+
+static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
+                      uint64_t extra)
+{
+       int ret;
+       struct ttm_mem_shrink *shrink;
+       uint64_t target;
+       uint64_t total_target;
+
+       spin_lock(&glob->lock);
+       if (glob->shrink == NULL)
+               goto out;
+
+       if (from_workqueue) {
+               target = glob->swap_limit;
+               total_target = glob->total_memory_swap_limit;
+       } else if (capable(CAP_SYS_ADMIN)) {
+               total_target = glob->emer_total_memory;
+               target = glob->emer_memory;
+       } else {
+               total_target = glob->max_total_memory;
+               target = glob->max_memory;
+       }
+
+       total_target = (extra >= total_target) ? 0 : total_target - extra;
+       target = (extra >= target) ? 0 : target - extra;
+
+       while (glob->used_memory > target ||
+              glob->used_total_memory > total_target) {
+               shrink = glob->shrink;
+               spin_unlock(&glob->lock);
+               ret = shrink->do_shrink(shrink);
+               spin_lock(&glob->lock);
+               if (unlikely(ret != 0))
+                       goto out;
+       }
+out:
+       spin_unlock(&glob->lock);
+}
+
+static void ttm_shrink_work(struct work_struct *work)
+{
+       struct ttm_mem_global *glob =
+           container_of(work, struct ttm_mem_global, work);
+
+       ttm_shrink(glob, true, 0ULL);
+}
+
+int ttm_mem_global_init(struct ttm_mem_global *glob)
+{
+       struct sysinfo si;
+       uint64_t mem;
+
+       spin_lock_init(&glob->lock);
+       glob->swap_queue = create_singlethread_workqueue("ttm_swap");
+       INIT_WORK(&glob->work, ttm_shrink_work);
+       init_waitqueue_head(&glob->queue);
+
+       si_meminfo(&si);
+
+       mem = si.totalram - si.totalhigh;
+       mem *= si.mem_unit;
+
+       glob->max_memory = mem >> 1;
+       glob->emer_memory = (mem >> 1) + (mem >> 2);
+       glob->swap_limit = glob->max_memory - (mem >> 3);
+       glob->used_memory = 0;
+       glob->used_total_memory = 0;
+       glob->shrink = NULL;
+
+       mem = si.totalram;
+       mem *= si.mem_unit;
+
+       glob->max_total_memory = mem >> 1;
+       glob->emer_total_memory = (mem >> 1) + (mem >> 2);
+
+       glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3);
+
+       printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n",
+              glob->max_total_memory >> 20);
+       printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n",
+              glob->max_memory >> 20);
+
+       return 0;
+}
+EXPORT_SYMBOL(ttm_mem_global_init);
+
+void ttm_mem_global_release(struct ttm_mem_global *glob)
+{
+       printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n",
+              (unsigned long long)glob->used_total_memory);
+       flush_workqueue(glob->swap_queue);
+       destroy_workqueue(glob->swap_queue);
+       glob->swap_queue = NULL;
+}
+EXPORT_SYMBOL(ttm_mem_global_release);
+
+static inline void ttm_check_swapping(struct ttm_mem_global *glob)
+{
+       bool needs_swapping;
+
+       spin_lock(&glob->lock);
+       needs_swapping = (glob->used_memory > glob->swap_limit ||
+                         glob->used_total_memory >
+                         glob->total_memory_swap_limit);
+       spin_unlock(&glob->lock);
+
+       if (unlikely(needs_swapping))
+               (void)queue_work(glob->swap_queue, &glob->work);
+
+}
+
+void ttm_mem_global_free(struct ttm_mem_global *glob,
+                        uint64_t amount, bool himem)
+{
+       spin_lock(&glob->lock);
+       glob->used_total_memory -= amount;
+       if (!himem)
+               glob->used_memory -= amount;
+       wake_up_all(&glob->queue);
+       spin_unlock(&glob->lock);
+}
+
+static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
+                                 uint64_t amount, bool himem, bool reserve)
+{
+       uint64_t limit;
+       uint64_t lomem_limit;
+       int ret = -ENOMEM;
+
+       spin_lock(&glob->lock);
+
+       if (capable(CAP_SYS_ADMIN)) {
+               limit = glob->emer_total_memory;
+               lomem_limit = glob->emer_memory;
+       } else {
+               limit = glob->max_total_memory;
+               lomem_limit = glob->max_memory;
+       }
+
+       if (unlikely(glob->used_total_memory + amount > limit))
+               goto out_unlock;
+       if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
+               goto out_unlock;
+
+       if (reserve) {
+               glob->used_total_memory += amount;
+               if (!himem)
+                       glob->used_memory += amount;
+       }
+       ret = 0;
+out_unlock:
+       spin_unlock(&glob->lock);
+       ttm_check_swapping(glob);
+
+       return ret;
+}
+
+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+                        bool no_wait, bool interruptible, bool himem)
+{
+       int count = TTM_MEMORY_ALLOC_RETRIES;
+
+       while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true)
+                       != 0)) {
+               if (no_wait)
+                       return -ENOMEM;
+               if (unlikely(count-- == 0))
+                       return -ENOMEM;
+               ttm_shrink(glob, false, memory + (memory >> 2) + 16);
+       }
+
+       return 0;
+}
+
+size_t ttm_round_pot(size_t size)
+{
+       if ((size & (size - 1)) == 0)
+               return size;
+       else if (size > PAGE_SIZE)
+               return PAGE_ALIGN(size);
+       else {
+               size_t tmp_size = 4;
+
+               while (tmp_size < size)
+                       tmp_size <<= 1;
+
+               return tmp_size;
+       }
+       return 0;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
new file mode 100644 (file)
index 0000000..59ce819
--- /dev/null
@@ -0,0 +1,50 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *         Jerome Glisse
+ */
+#include <linux/module.h>
+#include <ttm/ttm_module.h>
+
+static int __init ttm_init(void)
+{
+       ttm_global_init();
+       return 0;
+}
+
+static void __exit ttm_exit(void)
+{
+       ttm_global_release();
+}
+
+module_init(ttm_init);
+module_exit(ttm_exit);
+
+MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
+MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
new file mode 100644 (file)
index 0000000..c27ab3a
--- /dev/null
@@ -0,0 +1,635 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+
+static int ttm_tt_swapin(struct ttm_tt *ttm);
+
+#if defined(CONFIG_X86)
+static void ttm_tt_clflush_page(struct page *page)
+{
+       uint8_t *page_virtual;
+       unsigned int i;
+
+       if (unlikely(page == NULL))
+               return;
+
+       page_virtual = kmap_atomic(page, KM_USER0);
+
+       for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+               clflush(page_virtual + i);
+
+       kunmap_atomic(page_virtual, KM_USER0);
+}
+
+static void ttm_tt_cache_flush_clflush(struct page *pages[],
+                                      unsigned long num_pages)
+{
+       unsigned long i;
+
+       mb();
+       for (i = 0; i < num_pages; ++i)
+               ttm_tt_clflush_page(*pages++);
+       mb();
+}
+#else
+static void ttm_tt_ipi_handler(void *null)
+{
+       ;
+}
+#endif
+
+void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
+{
+
+#if defined(CONFIG_X86)
+       if (cpu_has_clflush) {
+               ttm_tt_cache_flush_clflush(pages, num_pages);
+               return;
+       }
+#else
+       if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
+               printk(KERN_ERR TTM_PFX
+                      "Timed out waiting for drm cache flush.\n");
+#endif
+}
+
+/**
+ * Allocates storage for pointers to the pages that back the ttm.
+ *
+ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
+ */
+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
+{
+       unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
+       ttm->pages = NULL;
+
+       if (size <= PAGE_SIZE)
+               ttm->pages = kzalloc(size, GFP_KERNEL);
+
+       if (!ttm->pages) {
+               ttm->pages = vmalloc_user(size);
+               if (ttm->pages)
+                       ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
+       }
+}
+
+static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
+{
+       if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
+               vfree(ttm->pages);
+               ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
+       } else {
+               kfree(ttm->pages);
+       }
+       ttm->pages = NULL;
+}
+
+static struct page *ttm_tt_alloc_page(unsigned page_flags)
+{
+       if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+               return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+
+       return alloc_page(GFP_HIGHUSER);
+}
+
+static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
+{
+       int write;
+       int dirty;
+       struct page *page;
+       int i;
+       struct ttm_backend *be = ttm->be;
+
+       BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
+       write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
+       dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
+
+       if (be)
+               be->func->clear(be);
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               page = ttm->pages[i];
+               if (page == NULL)
+                       continue;
+
+               if (page == ttm->dummy_read_page) {
+                       BUG_ON(write);
+                       continue;
+               }
+
+               if (write && dirty && !PageReserved(page))
+                       set_page_dirty_lock(page);
+
+               ttm->pages[i] = NULL;
+               ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
+               put_page(page);
+       }
+       ttm->state = tt_unpopulated;
+       ttm->first_himem_page = ttm->num_pages;
+       ttm->last_lomem_page = -1;
+}
+
+static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
+{
+       struct page *p;
+       struct ttm_bo_device *bdev = ttm->bdev;
+       struct ttm_mem_global *mem_glob = bdev->mem_glob;
+       int ret;
+
+       while (NULL == (p = ttm->pages[index])) {
+               p = ttm_tt_alloc_page(ttm->page_flags);
+
+               if (!p)
+                       return NULL;
+
+               if (PageHighMem(p)) {
+                       ret =
+                           ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
+                                                false, false, true);
+                       if (unlikely(ret != 0))
+                               goto out_err;
+                       ttm->pages[--ttm->first_himem_page] = p;
+               } else {
+                       ret =
+                           ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
+                                                false, false, false);
+                       if (unlikely(ret != 0))
+                               goto out_err;
+                       ttm->pages[++ttm->last_lomem_page] = p;
+               }
+       }
+       return p;
+out_err:
+       put_page(p);
+       return NULL;
+}
+
+struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
+{
+       int ret;
+
+       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+               ret = ttm_tt_swapin(ttm);
+               if (unlikely(ret != 0))
+                       return NULL;
+       }
+       return __ttm_tt_get_page(ttm, index);
+}
+
+int ttm_tt_populate(struct ttm_tt *ttm)
+{
+       struct page *page;
+       unsigned long i;
+       struct ttm_backend *be;
+       int ret;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+               ret = ttm_tt_swapin(ttm);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       be = ttm->be;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               page = __ttm_tt_get_page(ttm, i);
+               if (!page)
+                       return -ENOMEM;
+       }
+
+       be->func->populate(be, ttm->num_pages, ttm->pages,
+                          ttm->dummy_read_page);
+       ttm->state = tt_unbound;
+       return 0;
+}
+
+#ifdef CONFIG_X86
+static inline int ttm_tt_set_page_caching(struct page *p,
+                                         enum ttm_caching_state c_state)
+{
+       if (PageHighMem(p))
+               return 0;
+
+       switch (c_state) {
+       case tt_cached:
+               return set_pages_wb(p, 1);
+       case tt_wc:
+           return set_memory_wc((unsigned long) page_address(p), 1);
+       default:
+               return set_pages_uc(p, 1);
+       }
+}
+#else /* CONFIG_X86 */
+static inline int ttm_tt_set_page_caching(struct page *p,
+                                         enum ttm_caching_state c_state)
+{
+       return 0;
+}
+#endif /* CONFIG_X86 */
+
+/*
+ * Change caching policy for the linear kernel map
+ * for range of pages in a ttm.
+ */
+
+static int ttm_tt_set_caching(struct ttm_tt *ttm,
+                             enum ttm_caching_state c_state)
+{
+       int i, j;
+       struct page *cur_page;
+       int ret;
+
+       if (ttm->caching_state == c_state)
+               return 0;
+
+       if (c_state != tt_cached) {
+               ret = ttm_tt_populate(ttm);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       if (ttm->caching_state == tt_cached)
+               ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               cur_page = ttm->pages[i];
+               if (likely(cur_page != NULL)) {
+                       ret = ttm_tt_set_page_caching(cur_page, c_state);
+                       if (unlikely(ret != 0))
+                               goto out_err;
+               }
+       }
+
+       ttm->caching_state = c_state;
+
+       return 0;
+
+out_err:
+       for (j = 0; j < i; ++j) {
+               cur_page = ttm->pages[j];
+               if (likely(cur_page != NULL)) {
+                       (void)ttm_tt_set_page_caching(cur_page,
+                                                     ttm->caching_state);
+               }
+       }
+
+       return ret;
+}
+
+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
+{
+       enum ttm_caching_state state;
+
+       if (placement & TTM_PL_FLAG_WC)
+               state = tt_wc;
+       else if (placement & TTM_PL_FLAG_UNCACHED)
+               state = tt_uncached;
+       else
+               state = tt_cached;
+
+       return ttm_tt_set_caching(ttm, state);
+}
+
+static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
+{
+       int i;
+       struct page *cur_page;
+       struct ttm_backend *be = ttm->be;
+
+       if (be)
+               be->func->clear(be);
+       (void)ttm_tt_set_caching(ttm, tt_cached);
+       for (i = 0; i < ttm->num_pages; ++i) {
+               cur_page = ttm->pages[i];
+               ttm->pages[i] = NULL;
+               if (cur_page) {
+                       if (page_count(cur_page) != 1)
+                               printk(KERN_ERR TTM_PFX
+                                      "Erroneous page count. "
+                                      "Leaking pages.\n");
+                       ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
+                                           PageHighMem(cur_page));
+                       __free_page(cur_page);
+               }
+       }
+       ttm->state = tt_unpopulated;
+       ttm->first_himem_page = ttm->num_pages;
+       ttm->last_lomem_page = -1;
+}
+
+void ttm_tt_destroy(struct ttm_tt *ttm)
+{
+       struct ttm_backend *be;
+
+       if (unlikely(ttm == NULL))
+               return;
+
+       be = ttm->be;
+       if (likely(be != NULL)) {
+               be->func->destroy(be);
+               ttm->be = NULL;
+       }
+
+       if (likely(ttm->pages != NULL)) {
+               if (ttm->page_flags & TTM_PAGE_FLAG_USER)
+                       ttm_tt_free_user_pages(ttm);
+               else
+                       ttm_tt_free_alloced_pages(ttm);
+
+               ttm_tt_free_page_directory(ttm);
+       }
+
+       if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
+           ttm->swap_storage)
+               fput(ttm->swap_storage);
+
+       kfree(ttm);
+}
+
+int ttm_tt_set_user(struct ttm_tt *ttm,
+                   struct task_struct *tsk,
+                   unsigned long start, unsigned long num_pages)
+{
+       struct mm_struct *mm = tsk->mm;
+       int ret;
+       int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
+       struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
+
+       BUG_ON(num_pages != ttm->num_pages);
+       BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
+
+       /**
+        * Account user pages as lowmem pages for now.
+        */
+
+       ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
+                                  false, false, false);
+       if (unlikely(ret != 0))
+               return ret;
+
+       down_read(&mm->mmap_sem);
+       ret = get_user_pages(tsk, mm, start, num_pages,
+                            write, 0, ttm->pages, NULL);
+       up_read(&mm->mmap_sem);
+
+       if (ret != num_pages && write) {
+               ttm_tt_free_user_pages(ttm);
+               ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
+               return -ENOMEM;
+       }
+
+       ttm->tsk = tsk;
+       ttm->start = start;
+       ttm->state = tt_unbound;
+
+       return 0;
+}
+
+struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
+                            uint32_t page_flags, struct page *dummy_read_page)
+{
+       struct ttm_bo_driver *bo_driver = bdev->driver;
+       struct ttm_tt *ttm;
+
+       if (!bo_driver)
+               return NULL;
+
+       ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
+       if (!ttm)
+               return NULL;
+
+       ttm->bdev = bdev;
+
+       ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       ttm->first_himem_page = ttm->num_pages;
+       ttm->last_lomem_page = -1;
+       ttm->caching_state = tt_cached;
+       ttm->page_flags = page_flags;
+
+       ttm->dummy_read_page = dummy_read_page;
+
+       ttm_tt_alloc_page_directory(ttm);
+       if (!ttm->pages) {
+               ttm_tt_destroy(ttm);
+               printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
+               return NULL;
+       }
+       ttm->be = bo_driver->create_ttm_backend_entry(bdev);
+       if (!ttm->be) {
+               ttm_tt_destroy(ttm);
+               printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
+               return NULL;
+       }
+       ttm->state = tt_unpopulated;
+       return ttm;
+}
+
+void ttm_tt_unbind(struct ttm_tt *ttm)
+{
+       int ret;
+       struct ttm_backend *be = ttm->be;
+
+       if (ttm->state == tt_bound) {
+               ret = be->func->unbind(be);
+               BUG_ON(ret);
+               ttm->state = tt_unbound;
+       }
+}
+
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+       int ret = 0;
+       struct ttm_backend *be;
+
+       if (!ttm)
+               return -EINVAL;
+
+       if (ttm->state == tt_bound)
+               return 0;
+
+       be = ttm->be;
+
+       ret = ttm_tt_populate(ttm);
+       if (ret)
+               return ret;
+
+       ret = be->func->bind(be, bo_mem);
+       if (ret) {
+               printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
+               return ret;
+       }
+
+       ttm->state = tt_bound;
+
+       if (ttm->page_flags & TTM_PAGE_FLAG_USER)
+               ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
+       return 0;
+}
+EXPORT_SYMBOL(ttm_tt_bind);
+
+static int ttm_tt_swapin(struct ttm_tt *ttm)
+{
+       struct address_space *swap_space;
+       struct file *swap_storage;
+       struct page *from_page;
+       struct page *to_page;
+       void *from_virtual;
+       void *to_virtual;
+       int i;
+       int ret;
+
+       if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
+               ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
+                                     ttm->num_pages);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+               return 0;
+       }
+
+       swap_storage = ttm->swap_storage;
+       BUG_ON(swap_storage == NULL);
+
+       swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               from_page = read_mapping_page(swap_space, i, NULL);
+               if (IS_ERR(from_page))
+                       goto out_err;
+               to_page = __ttm_tt_get_page(ttm, i);
+               if (unlikely(to_page == NULL))
+                       goto out_err;
+
+               preempt_disable();
+               from_virtual = kmap_atomic(from_page, KM_USER0);
+               to_virtual = kmap_atomic(to_page, KM_USER1);
+               memcpy(to_virtual, from_virtual, PAGE_SIZE);
+               kunmap_atomic(to_virtual, KM_USER1);
+               kunmap_atomic(from_virtual, KM_USER0);
+               preempt_enable();
+               page_cache_release(from_page);
+       }
+
+       if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
+               fput(swap_storage);
+       ttm->swap_storage = NULL;
+       ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+
+       return 0;
+out_err:
+       ttm_tt_free_alloced_pages(ttm);
+       return -ENOMEM;
+}
+
+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
+{
+       struct address_space *swap_space;
+       struct file *swap_storage;
+       struct page *from_page;
+       struct page *to_page;
+       void *from_virtual;
+       void *to_virtual;
+       int i;
+
+       BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
+       BUG_ON(ttm->caching_state != tt_cached);
+
+       /*
+        * For user buffers, just unpin the pages, as there should be
+        * vma references.
+        */
+
+       if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
+               ttm_tt_free_user_pages(ttm);
+               ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+               ttm->swap_storage = NULL;
+               return 0;
+       }
+
+       if (!persistant_swap_storage) {
+               swap_storage = shmem_file_setup("ttm swap",
+                                               ttm->num_pages << PAGE_SHIFT,
+                                               0);
+               if (unlikely(IS_ERR(swap_storage))) {
+                       printk(KERN_ERR "Failed allocating swap storage.\n");
+                       return -ENOMEM;
+               }
+       } else
+               swap_storage = persistant_swap_storage;
+
+       swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               from_page = ttm->pages[i];
+               if (unlikely(from_page == NULL))
+                       continue;
+               to_page = read_mapping_page(swap_space, i, NULL);
+               if (unlikely(to_page == NULL))
+                       goto out_err;
+
+               preempt_disable();
+               from_virtual = kmap_atomic(from_page, KM_USER0);
+               to_virtual = kmap_atomic(to_page, KM_USER1);
+               memcpy(to_virtual, from_virtual, PAGE_SIZE);
+               kunmap_atomic(to_virtual, KM_USER1);
+               kunmap_atomic(from_virtual, KM_USER0);
+               preempt_enable();
+               set_page_dirty(to_page);
+               mark_page_accessed(to_page);
+               page_cache_release(to_page);
+       }
+
+       ttm_tt_free_alloced_pages(ttm);
+       ttm->swap_storage = swap_storage;
+       ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+       if (persistant_swap_storage)
+               ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
+
+       return 0;
+out_err:
+       if (!persistant_swap_storage)
+               fput(swap_storage);
+
+       return -ENOMEM;
+}
index 0dcf9ca..d0fcf36 100644 (file)
@@ -115,5 +115,7 @@ source "drivers/staging/line6/Kconfig"
 
 source "drivers/staging/serqt_usb/Kconfig"
 
+source "drivers/gpu/drm/radeon/Kconfig"
+
 endif # !STAGING_EXCLUDE_BUILD
 endif # STAGING
index f8634ab..45c1867 100644 (file)
        {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index fe3e3a4..41862e9 100644 (file)
@@ -496,6 +496,16 @@ typedef struct {
 #define DRM_RADEON_SETPARAM   0x19
 #define DRM_RADEON_SURF_ALLOC 0x1a
 #define DRM_RADEON_SURF_FREE  0x1b
+/* KMS ioctl */
+#define DRM_RADEON_GEM_INFO            0x1c
+#define DRM_RADEON_GEM_CREATE          0x1d
+#define DRM_RADEON_GEM_MMAP            0x1e
+#define DRM_RADEON_GEM_PREAD           0x21
+#define DRM_RADEON_GEM_PWRITE          0x22
+#define DRM_RADEON_GEM_SET_DOMAIN      0x23
+#define DRM_RADEON_GEM_WAIT_IDLE       0x24
+#define DRM_RADEON_CS                  0x26
+#define DRM_RADEON_INFO                        0x27
 
 #define DRM_IOCTL_RADEON_CP_INIT    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
 #define DRM_IOCTL_RADEON_CP_START   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -524,6 +534,17 @@ typedef struct {
 #define DRM_IOCTL_RADEON_SETPARAM   DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
 #define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
 #define DRM_IOCTL_RADEON_SURF_FREE  DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
+/* KMS */
+#define DRM_IOCTL_RADEON_GEM_INFO      DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
+#define DRM_IOCTL_RADEON_GEM_CREATE    DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
+#define DRM_IOCTL_RADEON_GEM_MMAP      DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
+#define DRM_IOCTL_RADEON_GEM_PREAD     DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
+#define DRM_IOCTL_RADEON_GEM_PWRITE    DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
+#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN        DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
+#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
+#define DRM_IOCTL_RADEON_CS            DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
+#define DRM_IOCTL_RADEON_INFO          DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
+
 
 typedef struct drm_radeon_init {
        enum {
@@ -682,6 +703,7 @@ typedef struct drm_radeon_indirect {
 #define RADEON_PARAM_VBLANK_CRTC           13   /* VBLANK CRTC */
 #define RADEON_PARAM_FB_LOCATION           14   /* FB location */
 #define RADEON_PARAM_NUM_GB_PIPES          15   /* num GB pipes */
+#define RADEON_PARAM_DEVICE_ID             16
 
 typedef struct drm_radeon_getparam {
        int param;
@@ -751,4 +773,112 @@ typedef struct drm_radeon_surface_free {
 #define        DRM_RADEON_VBLANK_CRTC1         1
 #define        DRM_RADEON_VBLANK_CRTC2         2
 
+/*
+ * Kernel modesetting world below.
+ */
+#define RADEON_GEM_DOMAIN_CPU          0x1
+#define RADEON_GEM_DOMAIN_GTT          0x2
+#define RADEON_GEM_DOMAIN_VRAM         0x4
+
+struct drm_radeon_gem_info {
+       uint64_t        gart_size;
+       uint64_t        vram_size;
+       uint64_t        vram_visible;
+};
+
+#define RADEON_GEM_NO_BACKING_STORE 1
+
+struct drm_radeon_gem_create {
+       uint64_t        size;
+       uint64_t        alignment;
+       uint32_t        handle;
+       uint32_t        initial_domain;
+       uint32_t        flags;
+};
+
+struct drm_radeon_gem_mmap {
+       uint32_t        handle;
+       uint32_t        pad;
+       uint64_t        offset;
+       uint64_t        size;
+       uint64_t        addr_ptr;
+};
+
+struct drm_radeon_gem_set_domain {
+       uint32_t        handle;
+       uint32_t        read_domains;
+       uint32_t        write_domain;
+};
+
+struct drm_radeon_gem_wait_idle {
+       uint32_t        handle;
+       uint32_t        pad;
+};
+
+struct drm_radeon_gem_busy {
+       uint32_t        handle;
+       uint32_t        busy;
+};
+
+struct drm_radeon_gem_pread {
+       /** Handle for the object being read. */
+       uint32_t handle;
+       uint32_t pad;
+       /** Offset into the object to read from */
+       uint64_t offset;
+       /** Length of data to read */
+       uint64_t size;
+       /** Pointer to write the data into. */
+       /* void *, but pointers are not 32/64 compatible */
+       uint64_t data_ptr;
+};
+
+struct drm_radeon_gem_pwrite {
+       /** Handle for the object being written to. */
+       uint32_t handle;
+       uint32_t pad;
+       /** Offset into the object to write to */
+       uint64_t offset;
+       /** Length of data to write */
+       uint64_t size;
+       /** Pointer to read the data from. */
+       /* void *, but pointers are not 32/64 compatible */
+       uint64_t data_ptr;
+};
+
+#define RADEON_CHUNK_ID_RELOCS 0x01
+#define RADEON_CHUNK_ID_IB     0x02
+
+struct drm_radeon_cs_chunk {
+       uint32_t                chunk_id;
+       uint32_t                length_dw;
+       uint64_t                chunk_data;
+};
+
+struct drm_radeon_cs_reloc {
+       uint32_t                handle;
+       uint32_t                read_domains;
+       uint32_t                write_domain;
+       uint32_t                flags;
+};
+
+struct drm_radeon_cs {
+       uint32_t                num_chunks;
+       uint32_t                cs_id;
+       /* this points to uint64_t * which point to cs chunks */
+       uint64_t                chunks;
+       /* updates to the limits after this CS ioctl */
+       uint64_t                gart_limit;
+       uint64_t                vram_limit;
+};
+
+#define RADEON_INFO_DEVICE_ID          0x00
+#define RADEON_INFO_NUM_GB_PIPES       0x01
+
+struct drm_radeon_info {
+       uint32_t                request;
+       uint32_t                pad;
+       uint64_t                value;
+};
+
 #endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
new file mode 100644 (file)
index 0000000..cd22ab4
--- /dev/null
@@ -0,0 +1,618 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_BO_API_H_
+#define _TTM_BO_API_H_
+
+#include "drm_hashtab.h"
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/bitmap.h>
+
+struct ttm_bo_device;
+
+struct drm_mm_node;
+
+/**
+ * struct ttm_mem_reg
+ *
+ * @mm_node: Memory manager node.
+ * @size: Requested size of memory region.
+ * @num_pages: Actual size of memory region in pages.
+ * @page_alignment: Page alignment.
+ * @placement: Placement flags.
+ *
+ * Structure indicating the placement and space resources used by a
+ * buffer object.
+ */
+
+struct ttm_mem_reg {
+       struct drm_mm_node *mm_node;
+       unsigned long size;
+       unsigned long num_pages;
+       uint32_t page_alignment;
+       uint32_t mem_type;
+       uint32_t placement;
+};
+
+/**
+ * enum ttm_bo_type
+ *
+ * @ttm_bo_type_device:        These are 'normal' buffers that can
+ * be mmapped by user space. Each of these bos occupy a slot in the
+ * device address space, that can be used for normal vm operations.
+ *
+ * @ttm_bo_type_user: These are user-space memory areas that are made
+ * available to the GPU by mapping the buffer pages into the GPU aperture
+ * space. These buffers cannot be mmaped from the device address space.
+ *
+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
+ * but they cannot be accessed from user-space. For kernel-only use.
+ */
+
+enum ttm_bo_type {
+       ttm_bo_type_device,
+       ttm_bo_type_user,
+       ttm_bo_type_kernel
+};
+
+struct ttm_tt;
+
+/**
+ * struct ttm_buffer_object
+ *
+ * @bdev: Pointer to the buffer object device structure.
+ * @buffer_start: The virtual user-space start address of ttm_bo_type_user
+ * buffers.
+ * @type: The bo type.
+ * @destroy: Destruction function. If NULL, kfree is used.
+ * @num_pages: Actual number of pages.
+ * @addr_space_offset: Address space offset.
+ * @acc_size: Accounted size for this object.
+ * @kref: Reference count of this buffer object. When this refcount reaches
+ * zero, the object is put on the delayed delete list.
+ * @list_kref: List reference count of this buffer object. This member is
+ * used to avoid destruction while the buffer object is still on a list.
+ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
+ * keeps one refcount. When this refcount reaches zero,
+ * the object is destroyed.
+ * @event_queue: Queue for processes waiting on buffer object status change.
+ * @lock: spinlock protecting mostly synchronization members.
+ * @proposed_placement: Proposed placement for the buffer. Changed only by the
+ * creator prior to validation as opposed to bo->mem.proposed_flags which is
+ * changed by the implementation prior to a buffer move if it wants to outsmart
+ * the buffer creator / user. This latter happens, for example, at eviction.
+ * @mem: structure describing current placement.
+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistant shmem object.
+ * @ttm: TTM structure holding system pages.
+ * @evicted: Whether the object was evicted without user-space knowing.
+ * @cpu_writes: For synchronization. Number of cpu writers.
+ * @lru: List head for the lru list.
+ * @ddestroy: List head for the delayed destroy list.
+ * @swap: List head for swap LRU list.
+ * @val_seq: Sequence of the validation holding the @reserved lock.
+ * Used to avoid starvation when many processes compete to validate the
+ * buffer. This member is protected by the bo_device::lru_lock.
+ * @seq_valid: The value of @val_seq is valid. This value is protected by
+ * the bo_device::lru_lock.
+ * @reserved: Deadlock-free lock used for synchronization state transitions.
+ * @sync_obj_arg: Opaque argument to synchronization object function.
+ * @sync_obj: Pointer to a synchronization object.
+ * @priv_flags: Flags describing buffer object internal state.
+ * @vm_rb: Rb node for the vm rb tree.
+ * @vm_node: Address space manager node.
+ * @offset: The current GPU offset, which can have different meanings
+ * depending on the memory type. For SYSTEM type memory, it should be 0.
+ * @cur_placement: Hint of current placement.
+ *
+ * Base class for TTM buffer object, that deals with data placement and CPU
+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
+ * the driver can usually use the placement offset @offset directly as the
+ * GPU virtual address. For drivers implementing multiple
+ * GPU memory manager contexts, the driver should manage the address space
+ * in these contexts separately and use these objects to get the correct
+ * placement and caching for these GPU maps. This makes it possible to use
+ * these objects for even quite elaborate memory management schemes.
+ * The destroy member, the API visibility of this object makes it possible
+ * to derive driver specific types.
+ */
+
+struct ttm_buffer_object {
+       /**
+        * Members constant at init.
+        */
+
+       struct ttm_bo_device *bdev;
+       unsigned long buffer_start;
+       enum ttm_bo_type type;
+       void (*destroy) (struct ttm_buffer_object *);
+       unsigned long num_pages;
+       uint64_t addr_space_offset;
+       size_t acc_size;
+
+       /**
+       * Members not needing protection.
+       */
+
+       struct kref kref;
+       struct kref list_kref;
+       wait_queue_head_t event_queue;
+       spinlock_t lock;
+
+       /**
+        * Members protected by the bo::reserved lock.
+        */
+
+       uint32_t proposed_placement;
+       struct ttm_mem_reg mem;
+       struct file *persistant_swap_storage;
+       struct ttm_tt *ttm;
+       bool evicted;
+
+       /**
+        * Members protected by the bo::reserved lock only when written to.
+        */
+
+       atomic_t cpu_writers;
+
+       /**
+        * Members protected by the bdev::lru_lock.
+        */
+
+       struct list_head lru;
+       struct list_head ddestroy;
+       struct list_head swap;
+       uint32_t val_seq;
+       bool seq_valid;
+
+       /**
+        * Members protected by the bdev::lru_lock
+        * only when written to.
+        */
+
+       atomic_t reserved;
+
+
+       /**
+        * Members protected by the bo::lock
+        */
+
+       void *sync_obj_arg;
+       void *sync_obj;
+       unsigned long priv_flags;
+
+       /**
+        * Members protected by the bdev::vm_lock
+        */
+
+       struct rb_node vm_rb;
+       struct drm_mm_node *vm_node;
+
+
+       /**
+        * Special members that are protected by the reserve lock
+        * and the bo::lock when written to. Can be read with
+        * either of these locks held.
+        */
+
+       unsigned long offset;
+       uint32_t cur_placement;
+};
+
+/**
+ * struct ttm_bo_kmap_obj
+ *
+ * @virtual: The current kernel virtual address.
+ * @page: The page when kmap'ing a single page.
+ * @bo_kmap_type: Type of bo_kmap.
+ *
+ * Object describing a kernel mapping. Since a TTM bo may be located
+ * in various memory types with various caching policies, the
+ * mapping can either be an ioremap, a vmap, a kmap or part of a
+ * premapped region.
+ */
+
+struct ttm_bo_kmap_obj {
+       void *virtual;
+       struct page *page;
+       enum {
+               ttm_bo_map_iomap,
+               ttm_bo_map_vmap,
+               ttm_bo_map_kmap,
+               ttm_bo_map_premapped,
+       } bo_kmap_type;
+};
+
+/**
+ * ttm_bo_reference - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ *
+ * Returns a refcounted pointer to a buffer object.
+ */
+
+static inline struct ttm_buffer_object *
+ttm_bo_reference(struct ttm_buffer_object *bo)
+{
+       kref_get(&bo->kref);
+       return bo;
+}
+
+/**
+ * ttm_bo_wait - wait for buffer idle.
+ *
+ * @bo:  The buffer object.
+ * @interruptible:  Use interruptible wait.
+ * @no_wait:  Return immediately if buffer is busy.
+ *
+ * This function must be called with the bo::mutex held, and makes
+ * sure any previous rendering to the buffer is completed.
+ * Note: It might be necessary to block validations before the
+ * wait by reserving the buffer.
+ * Returns -EBUSY if no_wait is true and the buffer is busy.
+ * Returns -ERESTART if interrupted by a signal.
+ */
+extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
+                      bool interruptible, bool no_wait);
+/**
+ * ttm_buffer_object_validate
+ *
+ * @bo: The buffer object.
+ * @proposed_placement: Proposed_placement for the buffer object.
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait: Return immediately if the buffer is busy.
+ *
+ * Changes placement and caching policy of the buffer object
+ * according to bo::proposed_flags.
+ * Returns
+ * -EINVAL on invalid proposed_flags.
+ * -ENOMEM on out-of-memory condition.
+ * -EBUSY if no_wait is true and buffer busy.
+ * -ERESTART if interrupted by a signal.
+ */
+extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
+                                     uint32_t proposed_placement,
+                                     bool interruptible, bool no_wait);
+/**
+ * ttm_bo_unref
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference and clear a pointer to a buffer object.
+ */
+extern void ttm_bo_unref(struct ttm_buffer_object **bo);
+
+/**
+ * ttm_bo_synccpu_write_grab
+ *
+ * @bo: The buffer object:
+ * @no_wait: Return immediately if buffer is busy.
+ *
+ * Synchronizes a buffer object for CPU RW access. This means
+ * blocking command submission that affects the buffer and
+ * waiting for buffer idle. This lock is recursive.
+ * Returns
+ * -EBUSY if the buffer is busy and no_wait is true.
+ * -ERESTART if interrupted by a signal.
+ */
+
+extern int
+ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+/**
+ * ttm_bo_synccpu_write_release:
+ *
+ * @bo : The buffer object.
+ *
+ * Releases a synccpu lock.
+ */
+extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_buffer_object_init
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @flags: Initial placement flags.
+ * @page_alignment: Data alignment in pages.
+ * @buffer_start: Virtual address of user space data backing a
+ * user buffer object.
+ * @interruptible: If needing to sleep to wait for GPU resources,
+ * sleep interruptible.
+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistant shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @acc_size: Accounted size for this object.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function,
+ * enables driver-specific objects derived from a ttm_buffer_object.
+ * On successful return, the object kref and list_kref are set to 1.
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTART: Interrupted by signal while sleeping waiting for resources.
+ */
+
+extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
+                                 struct ttm_buffer_object *bo,
+                                 unsigned long size,
+                                 enum ttm_bo_type type,
+                                 uint32_t flags,
+                                 uint32_t page_alignment,
+                                 unsigned long buffer_start,
+                                 bool interrubtible,
+                                 struct file *persistant_swap_storage,
+                                 size_t acc_size,
+                                 void (*destroy) (struct ttm_buffer_object *));
+/**
+ * ttm_bo_synccpu_object_init
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @flags: Initial placement flags.
+ * @page_alignment: Data alignment in pages.
+ * @buffer_start: Virtual address of user space data backing a
+ * user buffer object.
+ * @interruptible: If needing to sleep while waiting for GPU resources,
+ * sleep interruptible.
+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistant shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @p_bo: On successful completion *p_bo points to the created object.
+ *
+ * This function allocates a ttm_buffer_object, and then calls
+ * ttm_buffer_object_init on that object.
+ * The destroy function is set to kfree().
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTART: Interrupted by signal while waiting for resources.
+ */
+
+extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
+                                   unsigned long size,
+                                   enum ttm_bo_type type,
+                                   uint32_t flags,
+                                   uint32_t page_alignment,
+                                   unsigned long buffer_start,
+                                   bool interruptible,
+                                   struct file *persistant_swap_storage,
+                                   struct ttm_buffer_object **p_bo);
+
+/**
+ * ttm_bo_check_placement
+ *
+ * @bo: the buffer object.
+ * @set_flags: placement flags to set.
+ * @clr_flags: placement flags to clear.
+ *
+ * Performs minimal validity checking on an intended change of
+ * placement flags.
+ * Returns
+ * -EINVAL: Intended change is invalid or not allowed.
+ */
+
+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+                                 uint32_t set_flags, uint32_t clr_flags);
+
+/**
+ * ttm_bo_init_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ * @p_offset: offset for managed area in pages.
+ * @p_size: size managed area in pages.
+ *
+ * Initialize a manager for a given memory type.
+ * Note: if part of driver firstopen, it must be protected from a
+ * potentially racing lastclose.
+ * Returns:
+ * -EINVAL: invalid size or memory type.
+ * -ENOMEM: Not enough memory.
+ * May also return driver-specified errors.
+ */
+
+extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+                         unsigned long p_offset, unsigned long p_size);
+/**
+ * ttm_bo_clean_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ *
+ * Take down a manager for a given memory type after first walking
+ * the LRU list to evict any buffers left alive.
+ *
+ * Normally, this function is part of lastclose() or unload(), and at that
+ * point there shouldn't be any buffers left created by user-space, since
+ * there should've been removed by the file descriptor release() method.
+ * However, before this function is run, make sure to signal all sync objects,
+ * and verify that the delayed delete queue is empty. The driver must also
+ * make sure that there are no NO_EVICT buffers present in this memory type
+ * when the call is made.
+ *
+ * If this function is part of a VT switch, the caller must make sure that
+ * there are no appications currently validating buffers before this
+ * function is called. The caller can do that by first taking the
+ * struct ttm_bo_device::ttm_lock in write mode.
+ *
+ * Returns:
+ * -EINVAL: invalid or uninitialized memory type.
+ * -EBUSY: There are still buffers left in this memory type.
+ */
+
+extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+
+/**
+ * ttm_bo_evict_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ *
+ * Evicts all buffers on the lru list of the memory type.
+ * This is normally part of a VT switch or an
+ * out-of-memory-space-due-to-fragmentation handler.
+ * The caller must make sure that there are no other processes
+ * currently validating buffers, and can do that by taking the
+ * struct ttm_bo_device::ttm_lock in write mode.
+ *
+ * Returns:
+ * -EINVAL: Invalid or uninitialized memory type.
+ * -ERESTART: The call was interrupted by a signal while waiting to
+ * evict a buffer.
+ */
+
+extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+
+/**
+ * ttm_kmap_obj_virtual
+ *
+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
+ * virtual map is io memory, 0 if normal memory.
+ *
+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
+ * that should strictly be accessed by the iowriteXX() and similar functions.
+ */
+
+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
+                                        bool *is_iomem)
+{
+       *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
+                    map->bo_kmap_type == ttm_bo_map_premapped);
+       return map->virtual;
+}
+
+/**
+ * ttm_bo_kmap
+ *
+ * @bo: The buffer object.
+ * @start_page: The first page to map.
+ * @num_pages: Number of pages to map.
+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
+ * used to obtain a virtual address to the data.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
+
+extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
+                      unsigned long num_pages, struct ttm_bo_kmap_obj *map);
+
+/**
+ * ttm_bo_kunmap
+ *
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_kmap.
+ */
+
+extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+
+#if 0
+#endif
+
+/**
+ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
+ *
+ * @vma:       vma as input from the fbdev mmap method.
+ * @bo:        The bo backing the address space. The address space will
+ * have the same size as the bo, and start at offset 0.
+ *
+ * This function is intended to be called by the fbdev mmap method
+ * if the fbdev address space is to be backed by a bo.
+ */
+
+extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
+                         struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_mmap - mmap out of the ttm device address space.
+ *
+ * @filp:      filp as input from the mmap method.
+ * @vma:       vma as input from the mmap method.
+ * @bdev:      Pointer to the ttm_bo_device with the address space manager.
+ *
+ * This function is intended to be called by the device mmap method.
+ * if the device address space is to be backed by the bo manager.
+ */
+
+extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+                      struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_io
+ *
+ * @bdev:      Pointer to the struct ttm_bo_device.
+ * @filp:      Pointer to the struct file attempting to read / write.
+ * @wbuf:      User-space pointer to address of buffer to write. NULL on read.
+ * @rbuf:      User-space pointer to address of buffer to read into.
+ * Null on write.
+ * @count:     Number of bytes to read / write.
+ * @f_pos:     Pointer to current file position.
+ * @write:     1 for read, 0 for write.
+ *
+ * This function implements read / write into ttm buffer objects, and is
+ * intended to
+ * be called from the fops::read and fops::write method.
+ * Returns:
+ * See man (2) write, man(2) read. In particular,
+ * the function may return -EINTR if
+ * interrupted by a signal.
+ */
+
+extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+                        const char __user *wbuf, char __user *rbuf,
+                        size_t count, loff_t *f_pos, bool write);
+
+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+
+#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
new file mode 100644 (file)
index 0000000..62ed733
--- /dev/null
@@ -0,0 +1,867 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+#ifndef _TTM_BO_DRIVER_H_
+#define _TTM_BO_DRIVER_H_
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_memory.h"
+#include "drm_mm.h"
+#include "linux/workqueue.h"
+#include "linux/fs.h"
+#include "linux/spinlock.h"
+
+struct ttm_backend;
+
+struct ttm_backend_func {
+       /**
+        * struct ttm_backend_func member populate
+        *
+        * @backend: Pointer to a struct ttm_backend.
+        * @num_pages: Number of pages to populate.
+        * @pages: Array of pointers to ttm pages.
+        * @dummy_read_page: Page to be used instead of NULL pages in the
+        * array @pages.
+        *
+        * Populate the backend with ttm pages. Depending on the backend,
+        * it may or may not copy the @pages array.
+        */
+       int (*populate) (struct ttm_backend *backend,
+                        unsigned long num_pages, struct page **pages,
+                        struct page *dummy_read_page);
+       /**
+        * struct ttm_backend_func member clear
+        *
+        * @backend: Pointer to a struct ttm_backend.
+        *
+        * This is an "unpopulate" function. Release all resources
+        * allocated with populate.
+        */
+       void (*clear) (struct ttm_backend *backend);
+
+       /**
+        * struct ttm_backend_func member bind
+        *
+        * @backend: Pointer to a struct ttm_backend.
+        * @bo_mem: Pointer to a struct ttm_mem_reg describing the
+        * memory type and location for binding.
+        *
+        * Bind the backend pages into the aperture in the location
+        * indicated by @bo_mem. This function should be able to handle
+        * differences between aperture- and system page sizes.
+        */
+       int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
+
+       /**
+        * struct ttm_backend_func member unbind
+        *
+        * @backend: Pointer to a struct ttm_backend.
+        *
+        * Unbind previously bound backend pages. This function should be
+        * able to handle differences between aperture- and system page sizes.
+        */
+       int (*unbind) (struct ttm_backend *backend);
+
+       /**
+        * struct ttm_backend_func member destroy
+        *
+        * @backend: Pointer to a struct ttm_backend.
+        *
+        * Destroy the backend.
+        */
+       void (*destroy) (struct ttm_backend *backend);
+};
+
+/**
+ * struct ttm_backend
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @flags: For driver use.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
+ *
+ */
+
+struct ttm_backend {
+       struct ttm_bo_device *bdev;
+       uint32_t flags;
+       struct ttm_backend_func *func;
+};
+
+#define TTM_PAGE_FLAG_VMALLOC         (1 << 0)
+#define TTM_PAGE_FLAG_USER            (1 << 1)
+#define TTM_PAGE_FLAG_USER_DIRTY      (1 << 2)
+#define TTM_PAGE_FLAG_WRITE           (1 << 3)
+#define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
+#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
+#define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
+
+enum ttm_caching_state {
+       tt_uncached,
+       tt_wc,
+       tt_cached
+};
+
+/**
+ * struct ttm_tt
+ *
+ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
+ * pointer.
+ * @pages: Array of pages backing the data.
+ * @first_himem_page: Himem pages are put last in the page array, which
+ * enables us to run caching attribute changes on only the first part
+ * of the page array containing lomem pages. This is the index of the
+ * first himem page.
+ * @last_lomem_page: Index of the last lomem page in the page array.
+ * @num_pages: Number of pages in the page array.
+ * @bdev: Pointer to the current struct ttm_bo_device.
+ * @be: Pointer to the ttm backend.
+ * @tsk: The task for user ttm.
+ * @start: virtual address for user ttm.
+ * @swap_storage: Pointer to shmem struct file for swap storage.
+ * @caching_state: The current caching state of the pages.
+ * @state: The current binding state of the pages.
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+
+struct ttm_tt {
+       struct page *dummy_read_page;
+       struct page **pages;
+       long first_himem_page;
+       long last_lomem_page;
+       uint32_t page_flags;
+       unsigned long num_pages;
+       struct ttm_bo_device *bdev;
+       struct ttm_backend *be;
+       struct task_struct *tsk;
+       unsigned long start;
+       struct file *swap_storage;
+       enum ttm_caching_state caching_state;
+       enum {
+               tt_bound,
+               tt_unbound,
+               tt_unpopulated,
+       } state;
+};
+
+#define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)        /* Fixed (on-card) PCI memory */
+#define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)        /* Memory mappable */
+#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2)        /* Fixed memory needs ioremap
+                                                  before kernel access. */
+#define TTM_MEMTYPE_FLAG_CMA           (1 << 3)        /* Can't map aperture */
+
+/**
+ * struct ttm_mem_type_manager
+ *
+ * @has_type: The memory type has been initialized.
+ * @use_type: The memory type is enabled.
+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
+ * managed by this memory type.
+ * @gpu_offset: If used, the GPU offset of the first managed page of
+ * fixed memory or the first managed location in an aperture.
+ * @io_offset: The io_offset of the first managed page of IO memory or
+ * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
+ * memory, this should be set to NULL.
+ * @io_size: The size of a managed IO region (fixed memory or aperture).
+ * @io_addr: Virtual kernel address if the io region is pre-mapped. For
+ * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
+ * @io_addr should be set to NULL.
+ * @size: Size of the managed region.
+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
+ * as defined in ttm_placement_common.h
+ * @default_caching: The default caching policy used for a buffer object
+ * placed in this memory type if the user doesn't provide one.
+ * @manager: The range manager used for this memory type. FIXME: If the aperture
+ * has a page size different from the underlying system, the granularity
+ * of this manager should take care of this. But the range allocating code
+ * in ttm_bo.c needs to be modified for this.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ * It's set up by the ttm_bo_driver::init_mem_type method.
+ */
+
+struct ttm_mem_type_manager {
+
+       /*
+        * No protection. Constant from start.
+        */
+
+       bool has_type;
+       bool use_type;
+       uint32_t flags;
+       unsigned long gpu_offset;
+       unsigned long io_offset;
+       unsigned long io_size;
+       void *io_addr;
+       uint64_t size;
+       uint32_t available_caching;
+       uint32_t default_caching;
+
+       /*
+        * Protected by the bdev->lru_lock.
+        * TODO: Consider one lru_lock per ttm_mem_type_manager.
+        * Plays ill with list removal, though.
+        */
+
+       struct drm_mm manager;
+       struct list_head lru;
+};
+
+/**
+ * struct ttm_bo_driver
+ *
+ * @mem_type_prio: Priority array of memory types to place a buffer object in
+ * if it fits without evicting buffers from any of these memory types.
+ * @mem_busy_prio: Priority array of memory types to place a buffer object in
+ * if it needs to evict buffers to make room.
+ * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
+ * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
+ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
+ * @invalidate_caches: Callback to invalidate read caches when a buffer object
+ * has been evicted.
+ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
+ * structure.
+ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
+ * @move: Callback for a driver to hook in accelerated functions to
+ * move a buffer.
+ * If set to NULL, a potentially slow memcpy() move is used.
+ * @sync_obj_signaled: See ttm_fence_api.h
+ * @sync_obj_wait: See ttm_fence_api.h
+ * @sync_obj_flush: See ttm_fence_api.h
+ * @sync_obj_unref: See ttm_fence_api.h
+ * @sync_obj_ref: See ttm_fence_api.h
+ */
+
+struct ttm_bo_driver {
+       const uint32_t *mem_type_prio;
+       const uint32_t *mem_busy_prio;
+       uint32_t num_mem_type_prio;
+       uint32_t num_mem_busy_prio;
+
+       /**
+        * struct ttm_bo_driver member create_ttm_backend_entry
+        *
+        * @bdev: The buffer object device.
+        *
+        * Create a driver specific struct ttm_backend.
+        */
+
+       struct ttm_backend *(*create_ttm_backend_entry)
+        (struct ttm_bo_device *bdev);
+
+       /**
+        * struct ttm_bo_driver member invalidate_caches
+        *
+        * @bdev: the buffer object device.
+        * @flags: new placement of the rebound buffer object.
+        *
+        * A previosly evicted buffer has been rebound in a
+        * potentially new location. Tell the driver that it might
+        * consider invalidating read (texture) caches on the next command
+        * submission as a consequence.
+        */
+
+       int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
+       int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
+                             struct ttm_mem_type_manager *man);
+       /**
+        * struct ttm_bo_driver member evict_flags:
+        *
+        * @bo: the buffer object to be evicted
+        *
+        * Return the bo flags for a buffer which is not mapped to the hardware.
+        * These will be placed in proposed_flags so that when the move is
+        * finished, they'll end up in bo->mem.flags
+        */
+
+        uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
+       /**
+        * struct ttm_bo_driver member move:
+        *
+        * @bo: the buffer to move
+        * @evict: whether this motion is evicting the buffer from
+        * the graphics address space
+        * @interruptible: Use interruptible sleeps if possible when sleeping.
+        * @no_wait: whether this should give up and return -EBUSY
+        * if this move would require sleeping
+        * @new_mem: the new memory region receiving the buffer
+        *
+        * Move a buffer between two memory regions.
+        */
+       int (*move) (struct ttm_buffer_object *bo,
+                    bool evict, bool interruptible,
+                    bool no_wait, struct ttm_mem_reg *new_mem);
+
+       /**
+        * struct ttm_bo_driver_member verify_access
+        *
+        * @bo: Pointer to a buffer object.
+        * @filp: Pointer to a struct file trying to access the object.
+        *
+        * Called from the map / write / read methods to verify that the
+        * caller is permitted to access the buffer object.
+        * This member may be set to NULL, which will refuse this kind of
+        * access for all buffer objects.
+        * This function should return 0 if access is granted, -EPERM otherwise.
+        */
+       int (*verify_access) (struct ttm_buffer_object *bo,
+                             struct file *filp);
+
+       /**
+        * In case a driver writer dislikes the TTM fence objects,
+        * the driver writer can replace those with sync objects of
+        * his / her own. If it turns out that no driver writer is
+        * using these. I suggest we remove these hooks and plug in
+        * fences directly. The bo driver needs the following functionality:
+        * See the corresponding functions in the fence object API
+        * documentation.
+        */
+
+       bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
+       int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
+                             bool lazy, bool interruptible);
+       int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
+       void (*sync_obj_unref) (void **sync_obj);
+       void *(*sync_obj_ref) (void *sync_obj);
+};
+
+#define TTM_NUM_MEM_TYPES 8
+
+#define TTM_BO_PRIV_FLAG_MOVING  0     /* Buffer object is moving and needs
+                                          idling before CPU mapping */
+#define TTM_BO_PRIV_FLAG_MAX 1
+/**
+ * struct ttm_bo_device - Buffer object driver device-specific data.
+ *
+ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
+ * @count: Current number of buffer object.
+ * @pages: Current number of pinned pages.
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages.
+ * @shrink: A shrink callback object used for buffre object swap.
+ * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
+ * used by a buffer object. This is excluding page arrays and backing pages.
+ * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
+ * @man: An array of mem_type_managers.
+ * @addr_space_mm: Range manager for the device address space.
+ * lru_lock: Spinlock that protects the buffer+device lru lists and
+ * ddestroy lists.
+ * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
+ * If a GPU lockup has been detected, this is forced to 0.
+ * @dev_mapping: A pointer to the struct address_space representing the
+ * device address space.
+ * @wq: Work queue structure for the delayed delete workqueue.
+ *
+ */
+
+struct ttm_bo_device {
+
+       /*
+        * Constant after bo device init / atomic.
+        */
+
+       struct ttm_mem_global *mem_glob;
+       struct ttm_bo_driver *driver;
+       struct page *dummy_read_page;
+       struct ttm_mem_shrink shrink;
+
+       size_t ttm_bo_extra_size;
+       size_t ttm_bo_size;
+
+       rwlock_t vm_lock;
+       /*
+        * Protected by the vm lock.
+        */
+       struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+       struct rb_root addr_space_rb;
+       struct drm_mm addr_space_mm;
+
+       /*
+        * Might want to change this to one lock per manager.
+        */
+       spinlock_t lru_lock;
+       /*
+        * Protected by the lru lock.
+        */
+       struct list_head ddestroy;
+       struct list_head swap_lru;
+
+       /*
+        * Protected by load / firstopen / lastclose /unload sync.
+        */
+
+       bool nice_mode;
+       struct address_space *dev_mapping;
+
+       /*
+        * Internal protection.
+        */
+
+       struct delayed_work wq;
+};
+
+/**
+ * ttm_flag_masked
+ *
+ * @old: Pointer to the result and original value.
+ * @new: New value of bits.
+ * @mask: Mask of bits to change.
+ *
+ * Convenience function to change a number of bits identified by a mask.
+ */
+
+static inline uint32_t
+ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
+{
+       *old ^= (*old ^ new) & mask;
+       return *old;
+}
+
+/**
+ * ttm_tt_create
+ *
+ * @bdev: pointer to a struct ttm_bo_device:
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
+                                   unsigned long size,
+                                   uint32_t page_flags,
+                                   struct page *dummy_read_page);
+
+/**
+ * ttm_tt_set_user:
+ *
+ * @ttm: The struct ttm_tt to populate.
+ * @tsk: A struct task_struct for which @start is a valid user-space address.
+ * @start: A valid user-space address.
+ * @num_pages: Size in pages of the user memory area.
+ *
+ * Populate a struct ttm_tt with a user-space memory area after first pinning
+ * the pages backing it.
+ * Returns:
+ * !0: Error.
+ */
+
+extern int ttm_tt_set_user(struct ttm_tt *ttm,
+                          struct task_struct *tsk,
+                          unsigned long start, unsigned long num_pages);
+
+/**
+ * ttm_ttm_bind:
+ *
+ * @ttm: The struct ttm_tt containing backing pages.
+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
+ *
+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
+ */
+extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+/**
+ * ttm_ttm_destroy:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind, unpopulate and destroy a struct ttm_tt.
+ */
+extern void ttm_tt_destroy(struct ttm_tt *ttm);
+
+/**
+ * ttm_ttm_unbind:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind a struct ttm_tt.
+ */
+extern void ttm_tt_unbind(struct ttm_tt *ttm);
+
+/**
+ * ttm_ttm_destroy:
+ *
+ * @ttm: The struct ttm_tt.
+ * @index: Index of the desired page.
+ *
+ * Return a pointer to the struct page backing @ttm at page
+ * index @index. If the page is unpopulated, one will be allocated to
+ * populate that index.
+ *
+ * Returns:
+ * NULL on OOM.
+ */
+extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
+
+/**
+ * ttm_tt_cache_flush:
+ *
+ * @pages: An array of pointers to struct page:s to flush.
+ * @num_pages: Number of pages to flush.
+ *
+ * Flush the data of the indicated pages from the cpu caches.
+ * This is used when changing caching attributes of the pages from
+ * cache-coherent.
+ */
+extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
+
+/**
+ * ttm_tt_set_placement_caching:
+ *
+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
+ * @placement: Flag indicating the desired caching policy.
+ *
+ * This function will change caching policy of any default kernel mappings of
+ * the pages backing @ttm. If changing from cached to uncached or
+ * write-combined,
+ * all CPU caches will first be flushed to make sure the data of the pages
+ * hit RAM. This function may be very costly as it involves global TLB
+ * and cache flushes and potential page splitting / combining.
+ */
+extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
+extern int ttm_tt_swapout(struct ttm_tt *ttm,
+                         struct file *persistant_swap_storage);
+
+/*
+ * ttm_bo.c
+ */
+
+/**
+ * ttm_mem_reg_is_pci
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @mem: A valid struct ttm_mem_reg.
+ *
+ * Returns true if the memory described by @mem is PCI memory,
+ * false otherwise.
+ */
+extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
+                                  struct ttm_mem_reg *mem);
+
+/**
+ * ttm_bo_mem_space
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
+ * we want to allocate space for.
+ * @proposed_placement: Proposed new placement for the buffer object.
+ * @mem: A struct ttm_mem_reg.
+ * @interruptible: Sleep interruptible when sliping.
+ * @no_wait: Don't sleep waiting for space to become available.
+ *
+ * Allocate memory space for the buffer object pointed to by @bo, using
+ * the placement flags in @mem, potentially evicting other idle buffer objects.
+ * This function may sleep while waiting for space to become available.
+ * Returns:
+ * -EBUSY: No space available (only if no_wait == 1).
+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * fragmentation or concurrent allocators.
+ * -ERESTART: An interruptible sleep was interrupted by a signal.
+ */
+extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+                           uint32_t proposed_placement,
+                           struct ttm_mem_reg *mem,
+                           bool interruptible, bool no_wait);
+/**
+ * ttm_bo_wait_for_cpu
+ *
+ * @bo: Pointer to a struct ttm_buffer_object.
+ * @no_wait: Don't sleep while waiting.
+ *
+ * Wait until a buffer object is no longer sync'ed for CPU access.
+ * Returns:
+ * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
+ * -ERESTART: An interruptible sleep was interrupted by a signal.
+ */
+
+extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
+
+/**
+ * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
+ *
+ * @bo Pointer to a struct ttm_buffer_object.
+ * @bus_base On return the base of the PCI region
+ * @bus_offset On return the byte offset into the PCI region
+ * @bus_size On return the byte size of the buffer object or zero if
+ * the buffer object memory is not accessible through a PCI region.
+ *
+ * Returns:
+ * -EINVAL if the buffer object is currently not mappable.
+ * 0 otherwise.
+ */
+
+extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
+                            struct ttm_mem_reg *mem,
+                            unsigned long *bus_base,
+                            unsigned long *bus_offset,
+                            unsigned long *bus_size);
+
+extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_device_init
+ *
+ * @bdev: A pointer to a struct ttm_bo_device to initialize.
+ * @mem_global: A pointer to an initialized struct ttm_mem_global.
+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
+ * @file_page_offset: Offset into the device address space that is available
+ * for buffer data. This ensures compatibility with other users of the
+ * address space.
+ *
+ * Initializes a struct ttm_bo_device:
+ * Returns:
+ * !0: Failure.
+ */
+extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
+                             struct ttm_mem_global *mem_glob,
+                             struct ttm_bo_driver *driver,
+                             uint64_t file_page_offset);
+
+/**
+ * ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Locks a buffer object for validation. (Or prevents other processes from
+ * locking it for validation) and removes it from lru lists, while taking
+ * a number of measures to prevent deadlocks.
+ *
+ * Deadlocks may occur when two processes try to reserve multiple buffers in
+ * different order, either by will or as a result of a buffer being evicted
+ * to make room for a buffer already reserved. (Buffers are reserved before
+ * they are evicted). The following algorithm prevents such deadlocks from
+ * occuring:
+ * 1) Buffers are reserved with the lru spinlock held. Upon successful
+ * reservation they are removed from the lru list. This stops a reserved buffer
+ * from being evicted. However the lru spinlock is released between the time
+ * a buffer is selected for eviction and the time it is reserved.
+ * Therefore a check is made when a buffer is reserved for eviction, that it
+ * is still the first buffer in the lru list, before it is removed from the
+ * list. @check_lru == 1 forces this check. If it fails, the function returns
+ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
+ * the procedure.
+ * 2) Processes attempting to reserve multiple buffers other than for eviction,
+ * (typically execbuf), should first obtain a unique 32-bit
+ * validation sequence number,
+ * and call this function with @use_sequence == 1 and @sequence == the unique
+ * sequence number. If upon call of this function, the buffer object is already
+ * reserved, the validation sequence is checked against the validation
+ * sequence of the process currently reserving the buffer,
+ * and if the current validation sequence is greater than that of the process
+ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
+ * waiting for the buffer to become unreserved, after which it retries
+ * reserving.
+ * The caller should, when receiving an -EAGAIN error
+ * release all its buffer reservations, wait for @bo to become unreserved, and
+ * then rerun the validation with the same validation sequence. This procedure
+ * will always guarantee that the process with the lowest validation sequence
+ * will eventually succeed, preventing both deadlocks and starvation.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
+                         bool interruptible,
+                         bool no_wait, bool use_sequence, uint32_t sequence);
+
+/**
+ * ttm_bo_unreserve
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ */
+extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_wait_unreserved
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Wait for a struct ttm_buffer_object to become unreserved.
+ * This is typically used in the execbuf code to relax cpu-usage when
+ * a potential deadlock condition backoff.
+ */
+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
+                                 bool interruptible);
+
+/**
+ * ttm_bo_block_reservation
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Use interruptible sleep when waiting.
+ * @no_wait: Don't sleep, but rather return -EBUSY.
+ *
+ * Block reservation for validation by simply reserving the buffer.
+ * This is intended for single buffer use only without eviction,
+ * and thus needs no deadlock protection.
+ *
+ * Returns:
+ * -EBUSY: If no_wait == 1 and the buffer is already reserved.
+ * -ERESTART: If interruptible == 1 and the process received a signal
+ * while sleeping.
+ */
+extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
+                                   bool interruptible, bool no_wait);
+
+/**
+ * ttm_bo_unblock_reservation
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unblocks reservation leaving lru lists untouched.
+ */
+extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
+
+/*
+ * ttm_bo_util.c
+ */
+
+/**
+ * ttm_bo_move_ttm
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Optimized move function for a buffer object with both old and
+ * new placement backed by a TTM. The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
+
+extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+                          bool evict, bool no_wait,
+                          struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_move_memcpy
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Fallback move function for a mappable buffer object in mappable memory.
+ * The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
+
+extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+                             bool evict,
+                             bool no_wait, struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_free_old_node
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Utility function to free an old placement after a successful move.
+ */
+extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_move_accel_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @sync_obj: A sync object that signals when moving is complete.
+ * @sync_obj_arg: An argument to pass to the sync object idle / wait
+ * functions.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Accelerated move function to be called when an accelerated move
+ * has been scheduled. The function will create a new temporary buffer object
+ * representing the old placement, and put the sync object on both buffer
+ * objects. After that the newly created buffer object is unref'd to be
+ * destroyed when the move is complete. This will help pipeline
+ * buffer moves.
+ */
+
+extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+                                    void *sync_obj,
+                                    void *sync_obj_arg,
+                                    bool evict, bool no_wait,
+                                    struct ttm_mem_reg *new_mem);
+/**
+ * ttm_io_prot
+ *
+ * @c_state: Caching state.
+ * @tmp: Page protection flag for a normal, cached mapping.
+ *
+ * Utility function that returns the pgprot_t that should be used for
+ * setting up a PTE with the caching model indicated by @c_state.
+ */
+extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
+
+#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
+#define TTM_HAS_AGP
+#include <linux/agp_backend.h>
+
+/**
+ * ttm_agp_backend_init
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @bridge: The agp bridge this device is sitting on.
+ *
+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
+ * for TT memory. This function uses the linux agpgart interface to
+ * bind and unbind memory backing a ttm_tt.
+ */
+extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
+                                               struct agp_bridge_data *bridge);
+#endif
+
+#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
new file mode 100644 (file)
index 0000000..d8b8f04
--- /dev/null
@@ -0,0 +1,153 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TTM_MEMORY_H
+#define TTM_MEMORY_H
+
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+
+/**
+ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
+ *
+ * @do_shrink: The callback function.
+ *
+ * Arguments to the do_shrink functions are intended to be passed using
+ * inheritance. That is, the argument class derives from struct ttm_mem_srink,
+ * and can be accessed using container_of().
+ */
+
+struct ttm_mem_shrink {
+       int (*do_shrink) (struct ttm_mem_shrink *);
+};
+
+/**
+ * struct ttm_mem_global - Global memory accounting structure.
+ *
+ * @shrink: A single callback to shrink TTM memory usage. Extend this
+ * to a linked list to be able to handle multiple callbacks when needed.
+ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
+ * need a separate workqueue since it will spend a lot of time waiting
+ * for the GPU, and this will otherwise block other workqueue tasks(?)
+ * At this point we use only a single-threaded workqueue.
+ * @work: The workqueue callback for the shrink queue.
+ * @queue: Wait queue for processes suspended waiting for memory.
+ * @lock: Lock to protect the @shrink - and the memory accounting members,
+ * that is, essentially the whole structure with some exceptions.
+ * @emer_memory: Lowmem memory limit available for root.
+ * @max_memory: Lowmem memory limit available for non-root.
+ * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
+ * @used_memory: Currently used lowmem memory.
+ * @used_total_memory: Currently used total (lowmem + highmem) memory.
+ * @total_memory_swap_limit: Total memory limit where the shrink workqueue
+ * kicks in.
+ * @max_total_memory: Total memory available to non-root processes.
+ * @emer_total_memory: Total memory available to root processes.
+ *
+ * Note that this structure is not per device. It should be global for all
+ * graphics devices.
+ */
+
+struct ttm_mem_global {
+       struct ttm_mem_shrink *shrink;
+       struct workqueue_struct *swap_queue;
+       struct work_struct work;
+       wait_queue_head_t queue;
+       spinlock_t lock;
+       uint64_t emer_memory;
+       uint64_t max_memory;
+       uint64_t swap_limit;
+       uint64_t used_memory;
+       uint64_t used_total_memory;
+       uint64_t total_memory_swap_limit;
+       uint64_t max_total_memory;
+       uint64_t emer_total_memory;
+};
+
+/**
+ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
+ *
+ * @shrink: The object to initialize.
+ * @func: The callback function.
+ */
+
+static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
+                                      int (*func) (struct ttm_mem_shrink *))
+{
+       shrink->do_shrink = func;
+}
+
+/**
+ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
+ *
+ * @glob: The struct ttm_mem_global object to register with.
+ * @shrink: An initialized struct ttm_mem_shrink object to register.
+ *
+ * Returns:
+ * -EBUSY: There's already a callback registered. (May change).
+ */
+
+static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
+                                         struct ttm_mem_shrink *shrink)
+{
+       spin_lock(&glob->lock);
+       if (glob->shrink != NULL) {
+               spin_unlock(&glob->lock);
+               return -EBUSY;
+       }
+       glob->shrink = shrink;
+       spin_unlock(&glob->lock);
+       return 0;
+}
+
+/**
+ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
+ *
+ * @glob: The struct ttm_mem_global object to unregister from.
+ * @shrink: A previously registert struct ttm_mem_shrink object.
+ *
+ */
+
+static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
+                                            struct ttm_mem_shrink *shrink)
+{
+       spin_lock(&glob->lock);
+       BUG_ON(glob->shrink != shrink);
+       glob->shrink = NULL;
+       spin_unlock(&glob->lock);
+}
+
+extern int ttm_mem_global_init(struct ttm_mem_global *glob);
+extern void ttm_mem_global_release(struct ttm_mem_global *glob);
+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+                               bool no_wait, bool interruptible, bool himem);
+extern void ttm_mem_global_free(struct ttm_mem_global *glob,
+                               uint64_t amount, bool himem);
+extern size_t ttm_round_pot(size_t size);
+#endif
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h
new file mode 100644 (file)
index 0000000..889a4c7
--- /dev/null
@@ -0,0 +1,58 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_MODULE_H_
+#define _TTM_MODULE_H_
+
+#include <linux/kernel.h>
+
+#define TTM_PFX "[TTM]"
+
+enum ttm_global_types {
+       TTM_GLOBAL_TTM_MEM = 0,
+       TTM_GLOBAL_TTM_BO,
+       TTM_GLOBAL_TTM_OBJECT,
+       TTM_GLOBAL_NUM
+};
+
+struct ttm_global_reference {
+       enum ttm_global_types global_type;
+       size_t size;
+       void *object;
+       int (*init) (struct ttm_global_reference *);
+       void (*release) (struct ttm_global_reference *);
+};
+
+extern void ttm_global_init(void);
+extern void ttm_global_release(void);
+extern int ttm_global_item_ref(struct ttm_global_reference *ref);
+extern void ttm_global_item_unref(struct ttm_global_reference *ref);
+
+#endif /* _TTM_MODULE_H_ */
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
new file mode 100644 (file)
index 0000000..c84ff15
--- /dev/null
@@ -0,0 +1,92 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_PLACEMENT_H_
+#define _TTM_PLACEMENT_H_
+/*
+ * Memory regions for data placement.
+ */
+
+#define TTM_PL_SYSTEM           0
+#define TTM_PL_TT               1
+#define TTM_PL_VRAM             2
+#define TTM_PL_PRIV0            3
+#define TTM_PL_PRIV1            4
+#define TTM_PL_PRIV2            5
+#define TTM_PL_PRIV3            6
+#define TTM_PL_PRIV4            7
+#define TTM_PL_PRIV5            8
+#define TTM_PL_SWAPPED          15
+
+#define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
+#define TTM_PL_FLAG_TT          (1 << TTM_PL_TT)
+#define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
+#define TTM_PL_FLAG_PRIV0       (1 << TTM_PL_PRIV0)
+#define TTM_PL_FLAG_PRIV1       (1 << TTM_PL_PRIV1)
+#define TTM_PL_FLAG_PRIV2       (1 << TTM_PL_PRIV2)
+#define TTM_PL_FLAG_PRIV3       (1 << TTM_PL_PRIV3)
+#define TTM_PL_FLAG_PRIV4       (1 << TTM_PL_PRIV4)
+#define TTM_PL_FLAG_PRIV5       (1 << TTM_PL_PRIV5)
+#define TTM_PL_FLAG_SWAPPED     (1 << TTM_PL_SWAPPED)
+#define TTM_PL_MASK_MEM         0x0000FFFF
+
+/*
+ * Other flags that affects data placement.
+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
+ * if available.
+ * TTM_PL_FLAG_SHARED means that another application may
+ * reference the buffer.
+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
+ * be evicted to make room for other buffers.
+ */
+
+#define TTM_PL_FLAG_CACHED      (1 << 16)
+#define TTM_PL_FLAG_UNCACHED    (1 << 17)
+#define TTM_PL_FLAG_WC          (1 << 18)
+#define TTM_PL_FLAG_SHARED      (1 << 20)
+#define TTM_PL_FLAG_NO_EVICT    (1 << 21)
+
+#define TTM_PL_MASK_CACHING     (TTM_PL_FLAG_CACHED | \
+                                TTM_PL_FLAG_UNCACHED | \
+                                TTM_PL_FLAG_WC)
+
+#define TTM_PL_MASK_MEMTYPE     (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
+
+/*
+ * Access flags to be used for CPU- and GPU- mappings.
+ * The idea is that the TTM synchronization mechanism will
+ * allow concurrent READ access and exclusive write access.
+ * Currently GPU- and CPU accesses are exclusive.
+ */
+
+#define TTM_ACCESS_READ         (1 << 0)
+#define TTM_ACCESS_WRITE        (1 << 1)
+
+#endif