1.6.16.4117/4.04.00.01
authorImagination Technologies/TI <imgtec>
Mon, 11 Feb 2013 00:18:29 +0000 (02:18 +0200)
committerGrazvydas Ignotas <notasas@gmail.com>
Mon, 11 Feb 2013 00:18:29 +0000 (02:18 +0200)
169 files changed:
Kbuild
Makefile
eurasiacon/build/linux/makefile.shared_conf
eurasiacon/build/linux/omap3430_linux/ignored_symbols.txt [changed mode: 0755->0644]
eurasiacon/build/linux/omap3430_linux/kbuild/build_gfx_kernelmodules [changed mode: 0755->0644]
eurasiacon/build/linux/omap3630_linux/kbuild/build_gfx_kernelmodules [changed mode: 0755->0644]
eurasiacon/build/linux/omap4430_linux/kbuild/Makefile [new file with mode: 0644]
eurasiacon/build/linux/omap4430_linux/makefile.core [new file with mode: 0644]
eurasiacon/build/linux/omap4430_linux/makefile.shared_conf [new file with mode: 0644]
include4/pvrversion.h
include4/servicesext.h
services4/3rdparty/bufferclass_ti/bc_cat.c
services4/3rdparty/bufferclass_ti/bc_cat.h
services4/3rdparty/dc_omapfb3_linux/3rdparty_dc_drm_shared.h [new file with mode: 0644]
services4/3rdparty/dc_omapfb3_linux/Kbuild [new file with mode: 0644]
services4/3rdparty/dc_omapfb3_linux/kbuild/Makefile [moved from services4/3rdparty/dc_ti8168_linux/kbuild/Makefile with 81% similarity]
services4/3rdparty/dc_omapfb3_linux/makefile.linux.common [new file with mode: 0644]
services4/3rdparty/dc_omapfb3_linux/omaplfb.h [new file with mode: 0644]
services4/3rdparty/dc_omapfb3_linux/omaplfb_displayclass.c [new file with mode: 0644]
services4/3rdparty/dc_omapfb3_linux/omaplfb_linux.c [new file with mode: 0644]
services4/3rdparty/dc_ti8168_linux/Kbuild [deleted file]
services4/3rdparty/dc_ti8168_linux/omaplfb.h [deleted file]
services4/3rdparty/dc_ti8168_linux/omaplfb_displayclass.c [deleted file]
services4/3rdparty/dc_ti8168_linux/omaplfb_linux.c [deleted file]
services4/3rdparty/dc_ti81xx_linux/3rdparty_dc_drm_shared.h [new file with mode: 0644]
services4/3rdparty/dc_ti81xx_linux/Kbuild
services4/3rdparty/dc_ti81xx_linux/kbuild/Makefile
services4/3rdparty/dc_ti81xx_linux/makefile.linux.common [new file with mode: 0644]
services4/3rdparty/dc_ti81xx_linux/modules.order [deleted file]
services4/3rdparty/dc_ti81xx_linux/omaplfb.h
services4/3rdparty/dc_ti81xx_linux/omaplfb_displayclass.c
services4/3rdparty/dc_ti81xx_linux/omaplfb_linux.c
services4/3rdparty/linux_drm/Kbuild [new file with mode: 0644]
services4/3rdparty/linux_drm/Kbuild_org [new file with mode: 0644]
services4/3rdparty/linux_drm/ati_pcigart.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_agpsupport.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_auth.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_buffer.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_bufs.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_cache.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_context.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_crtc.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_crtc_helper.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_debugfs.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_dma.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_dp_i2c_helper.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_drawable.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_drv.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_edid.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_encoder_slave.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_fb_helper.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_fops.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_gem.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_global.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_hashtab.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_info.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_ioc32.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_ioctl.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_irq.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_lock.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_memory.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_mm.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_modes.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_pci.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_platform.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_proc.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_scatter.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_sman.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_stub.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_sysfs.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_trace_points.c [new file with mode: 0644]
services4/3rdparty/linux_drm/drm_vm.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/Makefile [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/ati_pcigart.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_agpsupport.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_auth.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_bufs.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_cache.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_context.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_crtc.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_debugfs.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_dma.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_drawable.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_drv.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_edid.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_encoder_slave.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_fops.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_gem.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_hashtab.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_info.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_ioctl.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_irq.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_lock.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_memory.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_mm.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_modes.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_pci.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_proc.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_scatter.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_sman.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_stub.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_sysfs.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_vm.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/ati_pcigart.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_agpsupport.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_auth.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_bufs.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_cache.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_context.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_crtc.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_debugfs.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_dma.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_drawable.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_drv.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_edid.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_encoder_slave.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_fops.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_gem.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_hashtab.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_info.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_ioctl.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_irq.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_lock.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_memory.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_mm.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_modes.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_pci.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_proc.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_scatter.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_sman.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_stub.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_sysfs.c [new file with mode: 0644]
services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_vm.c [new file with mode: 0644]
services4/3rdparty/linux_drm/pvr_drm_mod.h [moved from services4/srvkm/hwdefs/sgxcoretypes.h with 80% similarity]
services4/3rdparty/linux_drm/pvr_drm_stubs.c [new file with mode: 0644]
services4/include/sgxinfo.h
services4/srvkm/bridged/bridged_pvr_bridge.c
services4/srvkm/bridged/sgx/bridged_sgx_bridge.c
services4/srvkm/common/metrics.c
services4/srvkm/common/pvrsrv.c
services4/srvkm/common/queue.c
services4/srvkm/devices/sgx/sgxconfig.h
services4/srvkm/devices/sgx/sgxinit.c
services4/srvkm/devices/sgx/sgxkick.c
services4/srvkm/devices/sgx/sgxpower.c
services4/srvkm/devices/sgx/sgxreset.c
services4/srvkm/devices/sgx/sgxtransfer.c
services4/srvkm/devices/sgx/sgxutils.c
services4/srvkm/devices/sgx/sgxutils.h
services4/srvkm/env/linux/mutils.h
services4/srvkm/env/linux/osfunc.c
services4/srvkm/env/linux/pvr_bridge_k.c
services4/srvkm/env/linux/pvr_drm.c
services4/srvkm/hwdefs/sgx531defs.h [deleted file]
services4/srvkm/include/device.h
services4/srvkm/include/osfunc.h
services4/system/omap4/oemfuncs.h [moved from services4/system/ti8168/oemfuncs.h with 97% similarity]
services4/system/omap4/sysconfig.c [moved from services4/system/ti8168/sysconfig.c with 80% similarity]
services4/system/omap4/sysconfig.h [moved from services4/system/ti8168/sysconfig.h with 65% similarity]
services4/system/omap4/sysinfo.h [new file with mode: 0644]
services4/system/omap4/syslocal.h [moved from services4/system/ti8168/syslocal.h with 94% similarity]
services4/system/omap4/sysutils.c [moved from services4/system/ti8168/sysutils.c with 97% similarity]
services4/system/omap4/sysutils_linux.c [moved from services4/system/ti81xx/sysutils_linux_wqueue_compat_orig.c with 86% similarity]
services4/system/ti8168/sysinfo.h [deleted file]
services4/system/ti8168/sysutils_linux.c [deleted file]
services4/system/ti81xx/.sysconfig.h.swp [deleted file]
services4/system/ti81xx/sysutils_linux_wqueue_compat.c
tools/intern/debug/dbgdriv/linux/hostfunc.c
tools/intern/debug/dbgdriv/linux/main.c

diff --git a/Kbuild b/Kbuild
index a34841f..b79c74d 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -41,6 +41,17 @@ services4/srvkm/bridged/sgx/bridged_sgx_bridge.c \
 services4/system/$(TI_PLATFORM)/sysutils.c \
 services4/system/$(TI_PLATFORM)/sysconfig.c \
 
+ifneq ($(FBDEV),no)
+EXTRA_CFLAGS += -DFBDEV_PRESENT
+endif
+
+
+ifeq ($(TI_PLATFORM),ti81xx)
+DRIFILES = services4/srvkm/env/linux/pvr_drm.c services4/3rdparty/dc_ti81xx_linux/omaplfb_linux.c services4/3rdparty/dc_ti81xx_linux/omaplfb_displayclass.c 
+else
+DRIFILES = services4/srvkm/env/linux/pvr_drm.c services4/3rdparty/dc_omapfb3_linux/omaplfb_linux.c services4/3rdparty/dc_omapfb3_linux/omaplfb_displayclass.c
+endif
+
 EXTRA_CFLAGS += -I$(src)/include4
 EXTRA_CFLAGS += -I$(src)/services4/include
 EXTRA_CFLAGS += -I$(src)/services4/srvkm/include
@@ -52,14 +63,35 @@ EXTRA_CFLAGS += -I$(src)/services4/system/include
 EXTRA_CFLAGS += -I$(src)/services4/system/$(TI_PLATFORM)
 EXTRA_CFLAGS += -I$(src)/services4/srvkm/bridged/sgx
 
+
+ifeq ($(SUPPORT_XORG),1)
+EXTRA_CFLAGS += -I$(KERNELDIR)/include/drm 
+EXTRA_CFLAGS += -I$(src)/services4/3rdparty/linux_drm
+EXTRA_CFLAGS += -I$(src)/services4/include/env/linux
+EXTRA_CFLAGS += -I$(KERNELDIR)/drivers/video/omap2
+EXTRA_CFLAGS += -I$(KERNELDIR)/arch/arm/plat-omap/include
+ifeq ($(TI_PLATFORM),omap4)
+EXTRA_CFLAGS += -DCONFIG_SLOW_WORK 
+endif
+endif
+
 EXTRA_CFLAGS += $(ALL_CFLAGS)
 
 pvrsrvkm-y     := $(FILES:.c=.o)
 
+ifeq ($(SUPPORT_XORG),1)
+pvrsrvkm-y +=  $(DRIFILES:.c=.o)
+endif
+
+ifneq ($(SUPPORT_XORG),1)
 ifeq ($(TI_PLATFORM),ti81xx)
 obj-y := services4/3rdparty/dc_ti81xx_linux/
 else
-obj-y := services4/3rdparty/dc_omap3430_linux/
+obj-y := services4/3rdparty/dc_omapfb3_linux/
+endif
 endif
 obj-y += services4/3rdparty/bufferclass_ti/
 
+ifeq ($(SUPPORT_XORG),1)
+obj-y += services4/3rdparty/linux_drm/
+endif
index 7fe1828..2e13203 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -26,8 +26,15 @@ SYS_CFLAGS += -DPVR_HAS_BROKEN_OMAPFB_H
 endif
 OPTIM                   = -Os
 
-
+ifeq ($(TI_PLATFORM),omap4)
+SGXCORE = 540
+else
 SGXCORE = 530
+endif
+
+ifeq ($(TI_PLATFORM),omap4)
+CORE = -DSGX540 -DSUPPORT_SGX540 -DSGX_CORE_REV=120
+else
 ifeq ($(TI_PLATFORM),ti81xx)
 CORE = -DPLAT_TI81xx -DSGX530 -DSUPPORT_SGX530 -DSGX_CORE_REV=125
 else
@@ -43,6 +50,7 @@ endif
 endif
 endif
 endif
+endif
 
 SUPPORT_SGX = 1
 SUPPORT_HW_RECOVERY = 1
@@ -53,6 +61,31 @@ SUPPORT_TI_PM = 0
 PVR2D_ALT_2DHW = 1
 
 LDM_PLATFORM ?= 1
+SUPPORT_XORG ?=0
+SUPPORT_DRI_DRM_NOT_PCI ?= 0
+
+ifeq ($(SUPPORT_XORG),1)
+SUPPORT_DRI_DRM = 1
+SUPPORT_DRI_DRM_NOT_PCI = 1
+endif
+
+
+ifeq ($(SUPPORT_DRI_DRM_NOT_PCI),1)
+KBUILD_EXTRA_SYMBOLS = `pwd`/services4/3rdparty/linux_drm/kbuild/Module.symvers
+endif
+
+SUPPORT_DRI_DRM ?= $(SUPPORT_XORG)
+SUPPORT_DRI_DRM_EXT ?= 0
+SUPPORT_DRI_DRM_NO_DROPMASTER ?= 0
+#SUPPORT_SECURE_DRM_AUTH_EXPORT ?= $(SUPPORT_XORG)
+
+SUPPORT_DRI_DRM_NO_LIBDRM ?= 0
+ifneq ($(SUPPORT_XORG),1)
+ifeq ($(SUPPORT_DRI_DRM),1)
+SUPPORT_DRI_DRM_NO_LIBDRM = 1
+endif
+endif
+
 
 # Only enable active power management if passive power management is
 # enabled, as indicated by LDM_PLATFORM being set to 1.  On OMAP,
@@ -70,11 +103,25 @@ else
 SUPPORT_ACTIVE_POWER_MANAGEMENT = 0
 endif
 
+#if 0
+ifeq ($(LDM_PLATFORM),1)
+DISPLAY_CONTROLLER                      = omaplfb
+OMAP_NON_FLIP_DISPLAY                   = 0
+else
+DISPLAY_CONTROLLER                      = pvrlfb
+DISPLAY_CONTROLLER_DIR                  = 3rdparty/linux_framebuffer
+OMAP_NON_FLIP_DISPLAY                   = 1
+endif
+#endif
+
+
 
 PVRSRV_MODNAME ?= pvrsrvkm
 
 SYS_CFLAGS += -DPVRSRV_MODNAME="\"$(PVRSRV_MODNAME)"\"
 
+#ARCH_CFLAGS             += -ftree-vectorize -mfpu=neon -mfloat-abi=hard
+ARCH_CFLAGS     += -Wno-sign-conversion
 
 
 export PVR_BUILD_DIR := $(shell pwd)
@@ -143,7 +190,6 @@ PVRSRV_LOG_MEMORY_ALLOCS ?= 0
 PVRSRV_DEBUG_OS_MEMORY ?= 0
 endif
 
-SUPPORT_XORG ?= 0
 ifneq ($(SUPPORT_XORG),1)
 SUPPORT_XWS        ?= 1
 XWS_SERVER_ONLY    ?= 0
@@ -164,13 +210,11 @@ else
 SUPPORT_SECURE_FD_EXPORT        ?= 0
 endif
 
-
-
+SUPPORT_DRI_DRM_NO_LIBDRM ?= 0
 
 TRANSFER_QUEUE                         ?= 1
 SUPPORT_SGX_EVENT_OBJECT               ?= 1
 SUPPORT_SECURE_HANDLES                 = 1
-SUPPORT_SECURE_FD_EXPORT               = 1
 SUPPORT_SRVINIT                                = 1
 SUPPORT_PERCONTEXT_PB                  = 1
 DISABLE_SGX_PB_GROW_SHRINK             ?= 1
@@ -178,6 +222,11 @@ SUPPORT_LINUX_X86_PAT                      ?=1
 SUPPORT_LINUX_X86_WRITECOMBINE                 ?=1
 SUPPORT_SGX_LOW_LATENCY_SCHEDULING     ?=1
 
+ifeq ($(SUPPORT_XORG),1)
+SUPPORT_PDUMP_MULTI_PROCESS = 1
+endif
+
+
 SUPPORT_OMAP3430_SGXFCLK_96M ?= 0
 SUPPORT_OMAP3430_OMAPFB3 ?= 0
 
@@ -272,11 +321,17 @@ SYS_CFLAGS.$(PVRSRV_RESET_ON_HWTIMEOUT)                 += -DPVRSRV_RESET_ON_HWT
 SYS_CFLAGS.$(PVRSRV_CLIENT_RESET_ON_HWTIMEOUT)  += -DPVRSRV_CLIENT_RESET_ON_HWTIMEOUT
 SYS_CFLAGS.$(NO_HARDWARE)                                               += -DNO_HARDWARE
 
+SYS_CFLAGS.$(SUPPORT_DRI_DRM)                                   += -DSUPPORT_DRI_DRM
+SYS_CFLAGS.$(SUPPORT_DRI_DRM_EXT)                               += -DSUPPORT_DRI_DRM_EXT
+SYS_CFLAGS.$(SUPPORT_DRI_DRM_NOT_PCI)                           += -DPVR_DRI_DRM_NOT_PCI
+SYS_CFLAGS.$(SUPPORT_DRI_DRM_NO_DROPMASTER)             += -DSUPPORT_DRI_DRM_NO_DROPMASTER
+SYS_CFLAGS.$(SUPPORT_DRI_DRM_NO_LIBDRM)                         += -DSUPPORT_DRI_DRM_NO_LIBDRM
+SYS_CFLAGS.$(DRM_PVR_RESERVED_INTEL_ORDER)              += -DDRM_PVR_RESERVED_INTEL_ORDER
+SYS_CFLAGS.$(DRM_PVR_USE_INTEL_FB)                              += -DDRM_PVR_USE_INTEL_FB
+
 
 
 
-SYS_CFLAGS.$(SUPPORT_DRI_DRM)                          += -DSUPPORT_DRI_DRM
-SYS_CFLAGS.$(SUPPORT_DRI_DRM_EXT)                               += -DSUPPORT_DRI_DRM_EXT
 SYS_CFLAGS.$(SUPPORT_LIBDRM_LITE)                               += -DSUPPORT_LIBDRM_LITE
 
 ifneq ("$(NO_HARDWARE)", "1")
@@ -349,8 +404,9 @@ SYS_CFLAGS.$(SUPPORT_PVR_PDP_LINUX_FB) += -DPVR_PDP_LINUX_FB
 SYS_CFLAGS.$(SUPPORT_LINUX_USING_WORKQUEUES) += -DPVR_LINUX_USING_WORKQUEUES \
                                 -DPVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE \
                                 -DPVR_LINUX_TIMERS_USING_WORKQUEUES \
-                                -DSYS_CUSTOM_POWERLOCK_WRAP
-
+                                -DSYS_CUSTOM_POWERLOCK_WRAP \
+                               -DPVR_NO_FULL_CACHE_OPS \
+                               -DSGX_CLK_CORE_DIV5
 
 
 SYS_CFLAGS.$(SUPPORT_SGX_NEW_STATUS_VALS)       += -DSUPPORT_SGX_NEW_STATUS_VALS
@@ -384,7 +440,10 @@ export ALL_CFLAGS =        -DLINUX \
                        $(SYS_CFLAGS) $(SYS_CFLAGS.1) \
                        $(MODULE_CFLAGS) $(MODULE_CFLAGS.$(BUILD)) \
                        $(CORE) -fno-strict-aliasing -Wno-pointer-arith \
-                       $(CFLAGS)
+                       $(CFLAGS) $(ARCH_CFLAGS)
+ifdef SUPPORT_DRI_DRM_NO_TTM
+export SUPPORT_DRI_DRM_NO_TTM
+endif
 
 all:
        $(MAKE) -C $(KERNELDIR) M=`pwd` $*
index 01bf4a0..04a4faf 100644 (file)
@@ -148,6 +148,7 @@ PVRVERSION_BUILD    = $(shell echo $(PVRVERSION) | $(CUT) -d '.' -f4,5,6)
 
 # Linux kernel defines
 #
+ifneq ($(NO_KERNEL_MODULES),1)
 KERNEL_VER             = $(shell grep "^VERSION = " \
                                        $(KERNELDIR)/Makefile | $(CUT) -f3 -d' ')
 KERNEL_REL             = $(shell grep "^PATCHLEVEL = " \
@@ -164,29 +165,19 @@ KERNEL_ID         ?= $(shell grep -h '\#define UTS_RELEASE' $(KERNELDIR)/include/linux/
                                $(CUT) -f3 -d' ' | \
                                $(SED) s/\"//g)
 
+KM_SUFFIX              = ko
+
 ifeq ("$(KERNEL_ID)", "")
 # For Linux 2.6.33, the above method of finding the KERNEL ID no longer
 # works, as UTS_RELEASE is no longer defined anywhere.
 KERNEL_ID              := $(KERNELVERSION)$(KERNEL_EXTRAVER)
 endif
+endif # !NO_KERNEL_MODULES
 
 # Get checksum from env variables
 #
 ENV_CHECKSUM   =       $(shell echo $(ALL_CFLAGS_$(MODSUFFIX)) | $(MD5SUM) - | $(CUT) -d' ' -f 1)
 
-# Linux kernel defines
-#
-ifeq ("$(KERNEL_VER)", "2")
-ifeq ("$(KERNEL_REL)", "6")
-KM_SUFFIX              = ko
-else
-KM_SUFFIX              = o
-CFLAGS_.o      += -DEXPORT_SYMTAB $(CFLAGS_.ko)
-endif
-else 
-KM_SUFFIX              = o
-endif
-
 # The standard CFLAGS macro can be overridden on the 'make' command line.  We
 # put CBUILD in a separate macro so its setting doesn't get lost when a user
 # *does* override CFLAGS.
@@ -318,7 +309,7 @@ SUPPORT_SGX_EVENT_OBJECT ?=1
 SUPPORT_SECURE_HANDLES         = 1
 SUPPORT_SRVINIT = 1
 SUPPORT_PERCONTEXT_PB = 1
-DISABLE_SGX_PB_GROW_SHRINK ?= 0
+DISABLE_SGX_PB_GROW_SHRINK ?= 1
 SUPPORT_LINUX_X86_PAT ?=1
 SUPPORT_LINUX_X86_WRITECOMBINE ?=1
 SUPPORT_SGX_LOW_LATENCY_SCHEDULING ?=1
@@ -514,8 +505,10 @@ SYS_CFLAGS += -DDEBUG_LOG_PATH_TRUNCATE=\"$(EURASIAROOT)\"
 
 SYS_INCLUDES   =       -I$(EURASIAROOT)/include4 \
                                        -I$(EURASIAROOT)/eurasiacon/includeext \
-                                       -I$(SYSBIN) \
-                                       -isystem $(KERNELDIR)/include
+                                       -I$(SYSBIN)
+ifneq ($(NO_KERNEL_MODULES),1)
+SYS_INCLUDES += -isystem $(KERNELDIR)/include
+endif
 
 
 ALL_CFLAGS_kbuild      =       $(CCFLAGS_KERNEL) -DLINUX \
@@ -534,9 +527,12 @@ export SUPPORT_XORG_SENSOR_FRAMEWORK
 endif
 
 # If we do not specify direst path to external 3pdd sources, use tarball
+EXTERNAL_3PDD_TARBALL_PATH :=
 ifeq ($(MRST_DRIVER_SOURCE),)
-EXTERNAL_3PDD_TARBALL_PATH = $(EURASIAROOT)/eurasiacon/external/$(EXTERNAL_3PDD_TARBALL)
+ifneq ($(EXTERNAL_3PDD_TARBALL),)
+EXTERNAL_3PDD_TARBALL_PATH := $(EURASIAROOT)/eurasiacon/external/$(EXTERNAL_3PDD_TARBALL)
 export EXTERNAL_3PDD_TARBALL_PATH
 endif
+endif
 
 
diff --git a/eurasiacon/build/linux/omap4430_linux/kbuild/Makefile b/eurasiacon/build/linux/omap4430_linux/kbuild/Makefile
new file mode 100644 (file)
index 0000000..079f52e
--- /dev/null
@@ -0,0 +1,35 @@
+#
+# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+# 
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+# 
+# This program is distributed in the hope it will be useful but, except 
+# as otherwise stated in writing, without any warranty; without even the 
+# implied warranty of merchantability or fitness for a particular purpose. 
+# See the GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+# 
+#
+#
+
+include ../../kbuild/Makefile.kbuild
+
+EXTRA_SUBDIRS =
+
+ifneq ($(SUPPORT_DRI_DRM),1)
+EXTRA_SUBDIRS += $(EURASIAROOT)/services4/$(DISPLAY_CONTROLLER_DIR)
+endif
+
+EXTRA_SUBDIRS += $(EURASIAROOT)/services4/3rdparty/bufferclass_example
diff --git a/eurasiacon/build/linux/omap4430_linux/makefile.core b/eurasiacon/build/linux/omap4430_linux/makefile.core
new file mode 100644 (file)
index 0000000..1bfa10e
--- /dev/null
@@ -0,0 +1,37 @@
+#
+# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+# 
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+# 
+# This program is distributed in the hope it will be useful but, except 
+# as otherwise stated in writing, without any warranty; without even the 
+# implied warranty of merchantability or fitness for a particular purpose. 
+# See the GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+# 
+#
+
+CORE = -DSGX$(SGXCORE) -DSUPPORT_SGX$(SGXCORE)
+
+ifeq ("$(SGXCOREREV)","")
+ifeq ("$(SGXCORE)","540")
+CORE += -DSGX_CORE_REV=110
+else
+CORE += -DUSE_SGX_CORE_REV_HEAD
+endif
+else
+CORE += -DSGX_CORE_REV=$(SGXCOREREV) 
+endif
+
diff --git a/eurasiacon/build/linux/omap4430_linux/makefile.shared_conf b/eurasiacon/build/linux/omap4430_linux/makefile.shared_conf
new file mode 100644 (file)
index 0000000..539a723
--- /dev/null
@@ -0,0 +1,90 @@
+#
+# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+# 
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+# 
+# This program is distributed in the hope it will be useful but, except 
+# as otherwise stated in writing, without any warranty; without even the 
+# implied warranty of merchantability or fitness for a particular purpose. 
+# See the GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+# 
+#
+
+# Tool chain and cross-compile settings.  gcc must be in the path.
+#
+CROSS_COMPILE          ?= arm-none-linux-gnueabi-
+TOOLCHAIN              ?= $(shell dirname $(shell which $(CROSS_COMPILE)gcc))/../
+OBJCOPYFORMAT          = elf32-littlearm
+
+CROSSPLATFORM_PATHS    =
+CROSSPLATFORM_LIBS     =-ldl 
+
+SYS_EXE_LDFLAGS = -Xlinker -rpath-link=$(TOOLCHAIN)/arm-none-linux-gnueabi/lib
+
+# Cross-compile extra settings.
+#
+PVR_SYSTEM                             = omap4
+
+ARCH_CFLAGS                            = -march=armv7-a
+
+# SYS_FLAGS contains any flags specific to this system
+SYS_CFLAGS                     = -DSGX_DYNAMIC_TIMING_INFO \
+                               -DSYS_CUSTOM_POWERLOCK_WRAP \
+                               -DPVR_NO_FULL_CACHE_OPS \
+                               -DSGX_CLK_CORE_DIV5
+
+# The version of the kernel that is required for compilation
+REQUIREDKERNELVERSION = 2.6.33
+
+LDM_PLATFORM ?= 1
+
+# Only enable active power management if passive power management is
+# enabled, as indicated by LDM_PLATFORM being set to 1.  On OMAP,
+# the system can suspend in the case where active power management is
+# enabled in the SGX driver, but passive power management isn't. As
+# passive power management isn't enabled, the driver won't see the
+# system suspend/resume events, and so won't take appropriate action.
+ifeq ($(LDM_PLATFORM),1)
+SUPPORT_ACTIVE_POWER_MANAGEMENT                ?= 1
+SUPPORT_LINUX_USING_WORKQUEUES                 = 1
+DISPLAY_CONTROLLER                     = omaplfb
+DISPLAY_CONTROLLER_DIR                         = 3rdparty/dc_omapfb3_linux
+OMAP_NON_FLIP_DISPLAY                  = 0
+else
+SUPPORT_ACTIVE_POWER_MANAGEMENT        = 0
+SUPPORT_LINUX_USING_SHARED_WORKQUEUES  = 1
+DISPLAY_CONTROLLER                     = pvrlfb
+DISPLAY_CONTROLLER_DIR                         = 3rdparty/linux_framebuffer
+OMAP_NON_FLIP_DISPLAY                  = 1
+endif
+
+#
+# OPTIM contains the optimisation level in timing and release builds
+OPTIM                  = -Os
+
+SGXCORE = 540
+SUPPORT_SGX = 1
+
+SUPPORT_HW_RECOVERY = 1
+SUPPORT_SGX_HWPERF = 1
+SYS_USING_INTERRUPTS = 1
+
+PVR2D_ALT_2DHW = 1
+
+ifeq ($(SUPPORT_XORG),1)
+SUPPORT_DRI_DRM = 1
+SUPPORT_DRI_DRM_NOT_PCI = 1
+endif
index 3288915..78181d9 100644 (file)
@@ -30,8 +30,8 @@
 #define PVRVERSION_MAJ 1
 #define PVRVERSION_MIN 6
 #define PVRVERSION_BRANCH 16
-#define PVRVERSION_BUILD 3977
-#define PVRVERSION_STRING "1.6.16.3977"
+#define PVRVERSION_BUILD 4117
+#define PVRVERSION_STRING "1.6.16.4117"
 #define PVRVERSION_FILE "eurasiacon.pj"
 
 #endif 
index 2f81b11..c2c14af 100644 (file)
@@ -804,6 +804,16 @@ typedef struct PVRSRV_CURSOR_INFO_TAG
 
 } PVRSRV_CURSOR_INFO;
 
+#if defined(PDUMP_SUSPEND_IS_PER_THREAD)
+typedef struct {
+       IMG_UINT32 threadId;
+       int suspendCount;
+} PVRSRV_THREAD_SUSPEND_COUNT;
+
+#define PVRSRV_PDUMP_SUSPEND_Q_NAME "PVRSRVPDumpSuspendMsgQ"
+#define PVRSRV_PDUMP_SUSPEND_Q_LENGTH 8
+
+#endif 
 
 typedef struct _PVRSRV_REGISTRY_INFO_
 {
index 79e7b85..6cb1e7a 100644 (file)
@@ -110,7 +110,7 @@ static int bc_release(struct inode *i, struct file *f);
 static int bc_ioctl(struct inode *inode, struct file *file,
                     unsigned int cmd, unsigned long arg);
 #else
-static int bc_ioctl(struct file *file,
+static long bc_ioctl(struct file *file,
                     unsigned int cmd, unsigned long arg);
 #endif
 static int bc_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -216,7 +216,7 @@ OPEN_FXN(8)
 OPEN_FXN(9)
 #endif
 
-static PVRSRV_ERROR CloseBCDevice(IMG_HANDLE hDevice)
+static PVRSRV_ERROR CloseBCDevice(IMG_UINT32 handle , IMG_HANDLE hDevice)
 {
     PVR_UNREFERENCED_PARAMETER(hDevice);
 
@@ -328,6 +328,12 @@ static int BC_CreateBuffers(int id, bc_buf_params_t *p)
         pixel_fmt = PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV;
         stride = p->width << 1;
         break;
+    
+    case BC_PIX_FMT_ARGB:
+        pixel_fmt = PVRSRV_PIXEL_FORMAT_ARGB8888;
+        stride = p->width << 2;
+        break;
+
     default:
         return -EINVAL;
         break;
@@ -922,7 +928,7 @@ static int bc_mmap(struct file *filp, struct vm_area_struct *vma)
 static int bc_ioctl(struct inode *inode, struct file *file,
                     unsigned int cmd, unsigned long arg)
 #else
-static int bc_ioctl(struct file *file,
+static long  bc_ioctl(struct file *file,
                     unsigned int cmd, unsigned long arg)
 #endif
 {
index 3ca0b69..388af28 100644 (file)
@@ -37,6 +37,7 @@
 #define BC_PIX_FMT_UYVY     BC_FOURCC('U', 'Y', 'V', 'Y') /*YUV 4:2:2*/
 #define BC_PIX_FMT_YUYV     BC_FOURCC('Y', 'U', 'Y', 'V') /*YUV 4:2:2*/
 #define BC_PIX_FMT_RGB565   BC_FOURCC('R', 'G', 'B', 'P') /*RGB 5:6:5*/
+#define BC_PIX_FMT_ARGB     BC_FOURCC('A', 'R', 'G', 'B') /*ARGB 8:8:8:8*/
 
 enum BC_memory {
     BC_MEMORY_MMAP          = 1,
diff --git a/services4/3rdparty/dc_omapfb3_linux/3rdparty_dc_drm_shared.h b/services4/3rdparty/dc_omapfb3_linux/3rdparty_dc_drm_shared.h
new file mode 100644 (file)
index 0000000..9b6d240
--- /dev/null
@@ -0,0 +1,45 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#ifndef __3RDPARTY_DC_DRM_SHARED_H__
+#define __3RDPARTY_DC_DRM_SHARED_H__
+#if defined(SUPPORT_DRI_DRM)
+
+#define        PVR_DRM_DISP_CMD_ENTER_VT       1
+#define        PVR_DRM_DISP_CMD_LEAVE_VT       2
+
+#define        PVR_DRM_DISP_CMD_ON             3
+#define        PVR_DRM_DISP_CMD_STANDBY        4
+#define        PVR_DRM_DISP_CMD_SUSPEND        5
+#define        PVR_DRM_DISP_CMD_OFF            6
+
+#define        PVR_DRM_DISP_ARG_CMD            0
+#define        PVR_DRM_DISP_ARG_DEV            1
+#define        PVR_DRM_DISP_NUM_ARGS           2
+
+#endif 
+#endif 
+
diff --git a/services4/3rdparty/dc_omapfb3_linux/Kbuild b/services4/3rdparty/dc_omapfb3_linux/Kbuild
new file mode 100644 (file)
index 0000000..1e6e052
--- /dev/null
@@ -0,0 +1,29 @@
+SYS_USING_INTERRUPTS = 1
+SUPPORT_OMAP3430_OMAPFB3 =1
+SUPPORT_TI_DSS_FW = 0
+PVR_LINUX_USING_WORKQUEUES = 1
+SYS_CFLAGS.$(SYS_USING_INTERRUPTS)                      += -DSYS_USING_INTERRUPTS
+SYS_CFLAGS.$(SUPPORT_OMAP3430_OMAPFB3)                         += -DSUPPORT_OMAP3430_OMAPFB3
+SYS_CFLAGS.$(SUPPORT_TI_DSS_FW)                         += -DSUPPORT_TI_DSS_FW
+SYS_CFLAGS.$(PVR_LINUX_USING_WORKQUEUES)             += -DPVR_LINUX_USING_WORKQUEUES
+SYS_CFLAGS += -DDISPLAY_CONTROLLER=omaplfb
+
+
+EXTRA_CFLAGS = -DLINUX \
+               -I$(PVR_BUILD_DIR)/include4 \
+               -I$(PVR_BUILD_DIR)/services4/include \
+               -I$(PVR_BUILD_DIR)/services4/system/$(PVR_SYSTEM) \
+               -I$(PVR_BUILD_DIR)/services4/srvkm/env/linux \
+               -I$(PVR_BUILD_DIR)/services4/include/env/linux \
+               -I$(PVR_BUILD_DIR)/services4/system/include \
+               -I$(KERNELDIR)/drivers/video/omap2 \
+               -I$(KERNELDIR)/arch/arm/plat-omap/include \
+               $(SYS_CFLAGS.1) \
+
+ifeq ($(SUPPORT_XORG),1)
+EXTRA_CFLAGS += -DSUPPORT_DRI_DRM
+EXTRA_CFLAGS += -DPVR_DISPLAY_CONTROLLER_DRM_IOCTL
+endif
+
+obj-m := omaplfb.o
+omaplfb-y := omaplfb_displayclass.o omaplfb_linux.o
 # Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
 # 
 #
-#
-
-include $(EURASIAROOT)/eurasiacon/build/linux/kbuild/Makefile.kbuild_subdir_common
 
-MODULE         = omaplfb
+MODULE         = $(DISPLAY_CONTROLLER)
 
-INCLUDES =     -I$(EURASIAROOT)/include4 \
-               -I$(EURASIAROOT)/services4/include \
-               -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
-               -I$(EURASIAROOT)/services4/system/include \
+INCLUDES =
 
-SOURCES        =       ../omaplfb_displayclass.c \
-                       ../omaplfb_linux.c
+SOURCES        =
 
 SYM_VERS_DEPS = $(EURASIAROOT)/services4/srvkm/env/linux
 
+include $(EURASIAROOT)/services4/$(DISPLAY_CONTROLLER_DIR)/makefile.linux.common
 
-
-
+include $(EURASIAROOT)/eurasiacon/build/linux/kbuild/Makefile.kbuild_subdir_common
diff --git a/services4/3rdparty/dc_omapfb3_linux/makefile.linux.common b/services4/3rdparty/dc_omapfb3_linux/makefile.linux.common
new file mode 100644 (file)
index 0000000..d5b4a30
--- /dev/null
@@ -0,0 +1,42 @@
+#
+# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+# 
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+# 
+# This program is distributed in the hope it will be useful but, except 
+# as otherwise stated in writing, without any warranty; without even the 
+# implied warranty of merchantability or fitness for a particular purpose. 
+# See the GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+# 
+#
+#
+
+ifeq ($(SUPPORT_DRI_DRM),1)
+DISPLAY_CONTROLLER_SOURCES_ROOT = $(KBUILDROOT)/$(DISPLAY_CONTROLLER_DIR)
+MODULE_CFLAGS += -DPVR_DISPLAY_CONTROLLER_DRM_IOCTL
+else
+DISPLAY_CONTROLLER_SOURCES_ROOT = ..
+endif
+
+INCLUDES +=    -I$(EURASIAROOT)/include4 \
+               -I$(EURASIAROOT)/services4/include \
+               -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
+               -I$(EURASIAROOT)/services4/system/include \
+               -I$(KERNELDIR)/drivers/video/omap2 \
+               -I$(KERNELDIR)/arch/arm/plat-omap/include
+
+SOURCES        +=      $(DISPLAY_CONTROLLER_SOURCES_ROOT)/omaplfb_displayclass.c \
+                       $(DISPLAY_CONTROLLER_SOURCES_ROOT)/omaplfb_linux.c
diff --git a/services4/3rdparty/dc_omapfb3_linux/omaplfb.h b/services4/3rdparty/dc_omapfb3_linux/omaplfb.h
new file mode 100644 (file)
index 0000000..881a49d
--- /dev/null
@@ -0,0 +1,269 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#ifndef __OMAPLFB_H__
+#define __OMAPLFB_H__
+
+#include <linux/version.h>
+
+#include <asm/atomic.h>
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#define unref__ __attribute__ ((unused))
+
+typedef void *       OMAPLFB_HANDLE;
+
+typedef bool OMAPLFB_BOOL, *OMAPLFB_PBOOL;
+#define        OMAPLFB_FALSE false
+#define OMAPLFB_TRUE true
+
+typedef        atomic_t        OMAPLFB_ATOMIC_BOOL;
+
+typedef atomic_t       OMAPLFB_ATOMIC_INT;
+
+typedef struct OMAPLFB_BUFFER_TAG
+{
+       struct OMAPLFB_BUFFER_TAG       *psNext;
+       struct OMAPLFB_DEVINFO_TAG      *psDevInfo;
+
+       struct work_struct sWork;
+
+       
+       unsigned long                   ulYOffset;
+
+       
+       
+       IMG_SYS_PHYADDR                 sSysAddr;
+       IMG_CPU_VIRTADDR                sCPUVAddr;
+       PVRSRV_SYNC_DATA                *psSyncData;
+
+       OMAPLFB_HANDLE                  hCmdComplete;
+       unsigned long                   ulSwapInterval;
+} OMAPLFB_BUFFER;
+
+typedef struct OMAPLFB_SWAPCHAIN_TAG
+{
+       
+       unsigned int                    uiSwapChainID;
+
+       
+       unsigned long                   ulBufferCount;
+
+       
+       OMAPLFB_BUFFER                  *psBuffer;
+
+       
+       struct workqueue_struct         *psWorkQueue;
+
+       
+       OMAPLFB_BOOL                    bNotVSynced;
+
+       
+       int                             iBlankEvents;
+
+       
+       unsigned int                    uiFBDevID;
+} OMAPLFB_SWAPCHAIN;
+
+typedef struct OMAPLFB_FBINFO_TAG
+{
+       unsigned long       ulFBSize;
+       unsigned long       ulBufferSize;
+       unsigned long       ulRoundedBufferSize;
+       unsigned long       ulWidth;
+       unsigned long       ulHeight;
+       unsigned long       ulByteStride;
+       unsigned long       ulPhysicalWidthmm;
+       unsigned long       ulPhysicalHeightmm;
+
+       
+       
+       IMG_SYS_PHYADDR     sSysAddr;
+       IMG_CPU_VIRTADDR    sCPUVAddr;
+
+       
+       PVRSRV_PIXEL_FORMAT ePixelFormat;
+}OMAPLFB_FBINFO;
+
+typedef struct OMAPLFB_DEVINFO_TAG
+{
+       
+       unsigned int            uiFBDevID;
+
+       
+       unsigned int            uiPVRDevID;
+
+       
+       struct mutex            sCreateSwapChainMutex;
+
+       
+       OMAPLFB_BUFFER          sSystemBuffer;
+
+       
+       PVRSRV_DC_DISP2SRV_KMJTABLE     sPVRJTable;
+       
+       
+       PVRSRV_DC_SRV2DISP_KMJTABLE     sDCJTable;
+
+       
+       OMAPLFB_FBINFO          sFBInfo;
+
+       
+       OMAPLFB_SWAPCHAIN      *psSwapChain;
+
+       
+       unsigned int            uiSwapChainID;
+
+       
+       OMAPLFB_ATOMIC_BOOL     sFlushCommands;
+
+       
+       struct fb_info         *psLINFBInfo;
+
+       
+       struct notifier_block   sLINNotifBlock;
+
+       
+       
+
+       
+       IMG_DEV_VIRTADDR        sDisplayDevVAddr;
+
+       DISPLAY_INFO            sDisplayInfo;
+
+       
+       DISPLAY_FORMAT          sDisplayFormat;
+       
+       
+       DISPLAY_DIMS            sDisplayDim;
+
+       
+       OMAPLFB_ATOMIC_BOOL     sBlanked;
+
+       
+       OMAPLFB_ATOMIC_INT      sBlankEvents;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+       
+       OMAPLFB_ATOMIC_BOOL     sEarlySuspendFlag;
+
+       struct early_suspend    sEarlySuspend;
+#endif
+
+#if defined(SUPPORT_DRI_DRM)
+       OMAPLFB_ATOMIC_BOOL     sLeaveVT;
+#endif
+
+}  OMAPLFB_DEVINFO;
+
+#define        OMAPLFB_PAGE_SIZE 4096
+
+#ifdef DEBUG
+#define        DEBUG_PRINTK(x) printk x
+#else
+#define        DEBUG_PRINTK(x)
+#endif
+
+#define DISPLAY_DEVICE_NAME "PowerVR OMAP Linux Display Driver"
+#define        DRVNAME "omaplfb"
+#define        DEVNAME DRVNAME
+#define        DRIVER_PREFIX DRVNAME
+
+typedef enum _OMAPLFB_ERROR_
+{
+       OMAPLFB_OK                             =  0,
+       OMAPLFB_ERROR_GENERIC                  =  1,
+       OMAPLFB_ERROR_OUT_OF_MEMORY            =  2,
+       OMAPLFB_ERROR_TOO_FEW_BUFFERS          =  3,
+       OMAPLFB_ERROR_INVALID_PARAMS           =  4,
+       OMAPLFB_ERROR_INIT_FAILURE             =  5,
+       OMAPLFB_ERROR_CANT_REGISTER_CALLBACK   =  6,
+       OMAPLFB_ERROR_INVALID_DEVICE           =  7,
+       OMAPLFB_ERROR_DEVICE_REGISTER_FAILED   =  8,
+       OMAPLFB_ERROR_SET_UPDATE_MODE_FAILED   =  9
+} OMAPLFB_ERROR;
+
+typedef enum _OMAPLFB_UPDATE_MODE_
+{
+       OMAPLFB_UPDATE_MODE_UNDEFINED                   = 0,
+       OMAPLFB_UPDATE_MODE_MANUAL                      = 1,
+       OMAPLFB_UPDATE_MODE_AUTO                        = 2,
+       OMAPLFB_UPDATE_MODE_DISABLED                    = 3
+} OMAPLFB_UPDATE_MODE;
+
+#ifndef UNREFERENCED_PARAMETER
+#define        UNREFERENCED_PARAMETER(param) (param) = (param)
+#endif
+
+OMAPLFB_ERROR OMAPLFBInit(void);
+OMAPLFB_ERROR OMAPLFBDeInit(void);
+
+OMAPLFB_DEVINFO *OMAPLFBGetDevInfoPtr(unsigned uiFBDevID);
+unsigned OMAPLFBMaxFBDevIDPlusOne(void);
+void *OMAPLFBAllocKernelMem(unsigned long ulSize);
+void OMAPLFBFreeKernelMem(void *pvMem);
+OMAPLFB_ERROR OMAPLFBGetLibFuncAddr(char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
+OMAPLFB_ERROR OMAPLFBCreateSwapQueue (OMAPLFB_SWAPCHAIN *psSwapChain);
+void OMAPLFBDestroySwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain);
+void OMAPLFBInitBufferForSwap(OMAPLFB_BUFFER *psBuffer);
+void OMAPLFBSwapHandler(OMAPLFB_BUFFER *psBuffer);
+void OMAPLFBQueueBufferForSwap(OMAPLFB_SWAPCHAIN *psSwapChain, OMAPLFB_BUFFER *psBuffer);
+void OMAPLFBFlip(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_BUFFER *psBuffer);
+OMAPLFB_UPDATE_MODE OMAPLFBGetUpdateMode(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_BOOL OMAPLFBSetUpdateMode(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_UPDATE_MODE eMode);
+OMAPLFB_BOOL OMAPLFBWaitForVSync(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_BOOL OMAPLFBManualSync(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_BOOL OMAPLFBCheckModeAndSync(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_ERROR OMAPLFBUnblankDisplay(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_ERROR OMAPLFBEnableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_ERROR OMAPLFBDisableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainLockInit(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainLockDeInit(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainLock(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainUnLock(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBAtomicBoolInit(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal);
+void OMAPLFBAtomicBoolDeInit(OMAPLFB_ATOMIC_BOOL *psAtomic);
+void OMAPLFBAtomicBoolSet(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal);
+OMAPLFB_BOOL OMAPLFBAtomicBoolRead(OMAPLFB_ATOMIC_BOOL *psAtomic);
+void OMAPLFBAtomicIntInit(OMAPLFB_ATOMIC_INT *psAtomic, int iVal);
+void OMAPLFBAtomicIntDeInit(OMAPLFB_ATOMIC_INT *psAtomic);
+void OMAPLFBAtomicIntSet(OMAPLFB_ATOMIC_INT *psAtomic, int iVal);
+int OMAPLFBAtomicIntRead(OMAPLFB_ATOMIC_INT *psAtomic);
+void OMAPLFBAtomicIntInc(OMAPLFB_ATOMIC_INT *psAtomic);
+
+#endif 
+
diff --git a/services4/3rdparty/dc_omapfb3_linux/omaplfb_displayclass.c b/services4/3rdparty/dc_omapfb3_linux/omaplfb_displayclass.c
new file mode 100644 (file)
index 0000000..0e035ad
--- /dev/null
@@ -0,0 +1,1228 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/notifier.h>
+
+#include "img_defs.h"
+#include "servicesext.h"
+#include "kerneldisplay.h"
+#include "omaplfb.h"
+
+#define OMAPLFB_COMMAND_COUNT          1
+
+#define        OMAPLFB_VSYNC_SETTLE_COUNT      5
+
+//#define      OMAPLFB_MAX_NUM_DEVICES         FB_MAX
+#define        OMAPLFB_MAX_NUM_DEVICES 1
+
+#if (OMAPLFB_MAX_NUM_DEVICES > FB_MAX)
+#error "OMAPLFB_MAX_NUM_DEVICES must not be greater than FB_MAX"
+#endif
+
+static OMAPLFB_DEVINFO *gapsDevInfo[OMAPLFB_MAX_NUM_DEVICES];
+
+static PFN_DC_GET_PVRJTABLE gpfnGetPVRJTable = NULL;
+
+static inline unsigned long RoundUpToMultiple(unsigned long x, unsigned long y)
+{
+       unsigned long div = x / y;
+       unsigned long rem = x % y;
+
+       return (div + ((rem == 0) ? 0 : 1)) * y;
+}
+
+static unsigned long GCD(unsigned long x, unsigned long y)
+{
+       while (y != 0)
+       {
+               unsigned long r = x % y;
+               x = y;
+               y = r;
+       }
+
+       return x;
+}
+
+static unsigned long LCM(unsigned long x, unsigned long y)
+{
+       unsigned long gcd = GCD(x, y);
+
+       return (gcd == 0) ? 0 : ((x / gcd) * y);
+}
+
+unsigned OMAPLFBMaxFBDevIDPlusOne(void)
+{
+       return OMAPLFB_MAX_NUM_DEVICES;
+}
+
+OMAPLFB_DEVINFO *OMAPLFBGetDevInfoPtr(unsigned uiFBDevID)
+{
+       WARN_ON(uiFBDevID >= OMAPLFBMaxFBDevIDPlusOne());
+
+       if (uiFBDevID >= OMAPLFB_MAX_NUM_DEVICES)
+       {
+               return NULL;
+       }
+
+       return gapsDevInfo[uiFBDevID];
+}
+
+static inline void OMAPLFBSetDevInfoPtr(unsigned uiFBDevID, OMAPLFB_DEVINFO *psDevInfo)
+{
+       WARN_ON(uiFBDevID >= OMAPLFB_MAX_NUM_DEVICES);
+
+       if (uiFBDevID < OMAPLFB_MAX_NUM_DEVICES)
+       {
+               gapsDevInfo[uiFBDevID] = psDevInfo;
+       }
+}
+
+static inline OMAPLFB_BOOL SwapChainHasChanged(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_SWAPCHAIN *psSwapChain)
+{
+       return (psDevInfo->psSwapChain != psSwapChain) ||
+               (psDevInfo->uiSwapChainID != psSwapChain->uiSwapChainID);
+}
+
+static inline OMAPLFB_BOOL DontWaitForVSync(OMAPLFB_DEVINFO *psDevInfo)
+{
+       OMAPLFB_BOOL bDontWait;
+
+       bDontWait = OMAPLFBAtomicBoolRead(&psDevInfo->sBlanked) ||
+                       OMAPLFBAtomicBoolRead(&psDevInfo->sFlushCommands);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+       bDontWait = bDontWait || OMAPLFBAtomicBoolRead(&psDevInfo->sEarlySuspendFlag);
+#endif
+#if defined(SUPPORT_DRI_DRM)
+       bDontWait = bDontWait || OMAPLFBAtomicBoolRead(&psDevInfo->sLeaveVT);
+#endif
+       return bDontWait;
+}
+
+static IMG_VOID SetDCState(IMG_HANDLE hDevice, IMG_UINT32 ui32State)
+{
+       OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO *)hDevice;
+
+       switch (ui32State)
+       {
+               case DC_STATE_FLUSH_COMMANDS:
+                       OMAPLFBAtomicBoolSet(&psDevInfo->sFlushCommands, OMAPLFB_TRUE);
+                       break;
+               case DC_STATE_NO_FLUSH_COMMANDS:
+                       OMAPLFBAtomicBoolSet(&psDevInfo->sFlushCommands, OMAPLFB_FALSE);
+                       break;
+               default:
+                       break;
+       }
+}
+
+static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 uiPVRDevID,
+                                 IMG_HANDLE *phDevice,
+                                 PVRSRV_SYNC_DATA* psSystemBufferSyncData)
+{
+       OMAPLFB_DEVINFO *psDevInfo;
+       OMAPLFB_ERROR eError;
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
+
+       for (i = 0; i < uiMaxFBDevIDPlusOne; i++)
+       {
+               psDevInfo = OMAPLFBGetDevInfoPtr(i);
+               if (psDevInfo != NULL && psDevInfo->uiPVRDevID == uiPVRDevID)
+               {
+                       break;
+               }
+       }
+       if (i == uiMaxFBDevIDPlusOne)
+       {
+               DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
+                       ": %s: PVR Device %u not found\n", __FUNCTION__, uiPVRDevID));
+               return PVRSRV_ERROR_INVALID_DEVICE;
+       }
+
+       
+       psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
+       
+       eError = OMAPLFBUnblankDisplay(psDevInfo);
+       if (eError != OMAPLFB_OK)
+       {
+               DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: OMAPLFBUnblankDisplay failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, eError));
+               return PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED;
+       }
+
+       
+       *phDevice = (IMG_HANDLE)psDevInfo;
+       
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice)
+{
+#if defined(SUPPORT_DRI_DRM)
+       OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO *)hDevice;
+
+       OMAPLFBAtomicBoolSet(&psDevInfo->sLeaveVT, OMAPLFB_FALSE);
+       (void) OMAPLFBUnblankDisplay(psDevInfo);
+#else
+       UNREFERENCED_PARAMETER(hDevice);
+#endif
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
+                                  IMG_UINT32 *pui32NumFormats,
+                                  DISPLAY_FORMAT *psFormat)
+{
+       OMAPLFB_DEVINFO *psDevInfo;
+       
+       if(!hDevice || !pui32NumFormats)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+       
+       *pui32NumFormats = 1;
+       
+       if(psFormat)
+       {
+               psFormat[0] = psDevInfo->sDisplayFormat;
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice, 
+                               DISPLAY_FORMAT *psFormat,
+                               IMG_UINT32 *pui32NumDims,
+                               DISPLAY_DIMS *psDim)
+{
+       OMAPLFB_DEVINFO *psDevInfo;
+
+       if(!hDevice || !psFormat || !pui32NumDims)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+       *pui32NumDims = 1;
+
+       
+       if(psDim)
+       {
+               psDim[0] = psDevInfo->sDisplayDim;
+       }
+       
+       return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE *phBuffer)
+{
+       OMAPLFB_DEVINFO *psDevInfo;
+       
+       if(!hDevice || !phBuffer)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+       *phBuffer = (IMG_HANDLE)&psDevInfo->sSystemBuffer;
+
+       return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO *psDCInfo)
+{
+       OMAPLFB_DEVINFO *psDevInfo;
+       
+       if(!hDevice || !psDCInfo)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+       *psDCInfo = psDevInfo->sDisplayInfo;
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE        hDevice,
+                                    IMG_HANDLE        hBuffer, 
+                                    IMG_SYS_PHYADDR   **ppsSysAddr,
+                                    IMG_UINT32        *pui32ByteSize,
+                                    IMG_VOID          **ppvCpuVAddr,
+                                    IMG_HANDLE        *phOSMapInfo,
+                                    IMG_BOOL          *pbIsContiguous,
+                                       IMG_UINT32                *pui32TilingStride)
+{
+       OMAPLFB_DEVINFO *psDevInfo;
+       OMAPLFB_BUFFER *psSystemBuffer;
+
+       UNREFERENCED_PARAMETER(pui32TilingStride);
+
+       if(!hDevice)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if(!hBuffer)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (!ppsSysAddr)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (!pui32ByteSize)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+       psSystemBuffer = (OMAPLFB_BUFFER *)hBuffer;
+
+       *ppsSysAddr = &psSystemBuffer->sSysAddr;
+
+       *pui32ByteSize = (IMG_UINT32)psDevInfo->sFBInfo.ulBufferSize;
+
+       if (ppvCpuVAddr)
+       {
+               *ppvCpuVAddr = psSystemBuffer->sCPUVAddr;
+       }
+
+       if (phOSMapInfo)
+       {
+               *phOSMapInfo = (IMG_HANDLE)0;
+       }
+
+       if (pbIsContiguous)
+       {
+               *pbIsContiguous = IMG_TRUE;
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
+                                      IMG_UINT32 ui32Flags,
+                                      DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
+                                      DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
+                                      IMG_UINT32 ui32BufferCount,
+                                      PVRSRV_SYNC_DATA **ppsSyncData,
+                                      IMG_UINT32 ui32OEMFlags,
+                                      IMG_HANDLE *phSwapChain,
+                                      IMG_UINT32 *pui32SwapChainID)
+{
+       OMAPLFB_DEVINFO *psDevInfo;
+       OMAPLFB_SWAPCHAIN *psSwapChain;
+       OMAPLFB_BUFFER *psBuffer;
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32BuffersToSkip;
+
+       UNREFERENCED_PARAMETER(ui32OEMFlags);
+       
+       
+       if(!hDevice
+       || !psDstSurfAttrib
+       || !psSrcSurfAttrib
+       || !ppsSyncData
+       || !phSwapChain)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+       
+       
+       if (psDevInfo->sDisplayInfo.ui32MaxSwapChains == 0)
+       {
+               return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       OMAPLFBCreateSwapChainLock(psDevInfo);
+
+       
+       if(psDevInfo->psSwapChain != NULL)
+       {
+               eError = PVRSRV_ERROR_FLIP_CHAIN_EXISTS;
+               goto ExitUnLock;
+       }
+       
+       
+       if(ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
+       {
+               eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+               goto ExitUnLock;
+       }
+       
+       if ((psDevInfo->sFBInfo.ulRoundedBufferSize * (unsigned long)ui32BufferCount) > psDevInfo->sFBInfo.ulFBSize)
+       {
+               eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+               goto ExitUnLock;
+       }
+
+       
+       ui32BuffersToSkip = psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers - ui32BufferCount;
+
+       
+       if(psDstSurfAttrib->pixelformat != psDevInfo->sDisplayFormat.pixelformat
+       || psDstSurfAttrib->sDims.ui32ByteStride != psDevInfo->sDisplayDim.ui32ByteStride
+       || psDstSurfAttrib->sDims.ui32Width != psDevInfo->sDisplayDim.ui32Width
+       || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
+       {
+               
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto ExitUnLock;
+       }               
+
+       if(psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat
+       || psDstSurfAttrib->sDims.ui32ByteStride != psSrcSurfAttrib->sDims.ui32ByteStride
+       || psDstSurfAttrib->sDims.ui32Width != psSrcSurfAttrib->sDims.ui32Width
+       || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
+       {
+               
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto ExitUnLock;
+       }               
+
+       
+       UNREFERENCED_PARAMETER(ui32Flags);
+       
+#if defined(PVR_OMAPFB3_UPDATE_MODE)
+       if (!OMAPLFBSetUpdateMode(psDevInfo, PVR_OMAPFB3_UPDATE_MODE))
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't set frame buffer update mode %d\n", __FUNCTION__, psDevInfo->uiFBDevID, PVR_OMAPFB3_UPDATE_MODE);
+       }
+#endif
+       
+       psSwapChain = (OMAPLFB_SWAPCHAIN*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_SWAPCHAIN));
+       if(!psSwapChain)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto ExitUnLock;
+       }
+
+       psBuffer = (OMAPLFB_BUFFER*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_BUFFER) * ui32BufferCount);
+       if(!psBuffer)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto ErrorFreeSwapChain;
+       }
+
+       psSwapChain->ulBufferCount = (unsigned long)ui32BufferCount;
+       psSwapChain->psBuffer = psBuffer;
+       psSwapChain->bNotVSynced = OMAPLFB_TRUE;
+       psSwapChain->uiFBDevID = psDevInfo->uiFBDevID;
+
+       
+       for(i=0; i<ui32BufferCount-1; i++)
+       {
+               psBuffer[i].psNext = &psBuffer[i+1];
+       }
+       
+       psBuffer[i].psNext = &psBuffer[0];
+
+       
+       for(i=0; i<ui32BufferCount; i++)
+       {
+               IMG_UINT32 ui32SwapBuffer = i + ui32BuffersToSkip;
+               IMG_UINT32 ui32BufferOffset = ui32SwapBuffer * (IMG_UINT32)psDevInfo->sFBInfo.ulRoundedBufferSize;
+
+               psBuffer[i].psSyncData = ppsSyncData[i];
+
+               psBuffer[i].sSysAddr.uiAddr = psDevInfo->sFBInfo.sSysAddr.uiAddr + ui32BufferOffset;
+               psBuffer[i].sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr + ui32BufferOffset;
+               psBuffer[i].ulYOffset = ui32BufferOffset / psDevInfo->sFBInfo.ulByteStride;
+               psBuffer[i].psDevInfo = psDevInfo;
+
+               OMAPLFBInitBufferForSwap(&psBuffer[i]);
+       }
+
+       if (OMAPLFBCreateSwapQueue(psSwapChain) != OMAPLFB_OK)
+       { 
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Failed to create workqueue\n", __FUNCTION__, psDevInfo->uiFBDevID);
+               eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+               goto ErrorFreeBuffers;
+       }
+
+       if (OMAPLFBEnableLFBEventNotification(psDevInfo)!= OMAPLFB_OK)
+       {
+               eError = PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT;
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't enable framebuffer event notification\n", __FUNCTION__, psDevInfo->uiFBDevID);
+               goto ErrorDestroySwapQueue;
+       }
+
+       psDevInfo->uiSwapChainID++;
+       if (psDevInfo->uiSwapChainID == 0)
+       {
+               psDevInfo->uiSwapChainID++;
+       }
+
+       psSwapChain->uiSwapChainID = psDevInfo->uiSwapChainID;
+
+       psDevInfo->psSwapChain = psSwapChain;
+
+       *pui32SwapChainID = psDevInfo->uiSwapChainID;
+
+       *phSwapChain = (IMG_HANDLE)psSwapChain;
+
+       eError = PVRSRV_OK;
+       goto ExitUnLock;
+
+ErrorDestroySwapQueue:
+       OMAPLFBDestroySwapQueue(psSwapChain);
+ErrorFreeBuffers:
+       OMAPLFBFreeKernelMem(psBuffer);
+ErrorFreeSwapChain:
+       OMAPLFBFreeKernelMem(psSwapChain);
+ExitUnLock:
+       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+       return eError;
+}
+
+static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice,
+       IMG_HANDLE hSwapChain)
+{
+       OMAPLFB_DEVINFO *psDevInfo;
+       OMAPLFB_SWAPCHAIN *psSwapChain;
+       OMAPLFB_ERROR eError;
+
+       
+       if(!hDevice || !hSwapChain)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       
+       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+       psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
+
+       OMAPLFBCreateSwapChainLock(psDevInfo);
+
+       if (SwapChainHasChanged(psDevInfo, psSwapChain))
+       {
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: Swap chain mismatch\n", __FUNCTION__, psDevInfo->uiFBDevID);
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto ExitUnLock;
+       }
+
+       
+       OMAPLFBDestroySwapQueue(psSwapChain);
+
+       eError = OMAPLFBDisableLFBEventNotification(psDevInfo);
+       if (eError != OMAPLFB_OK)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't disable framebuffer event notification\n", __FUNCTION__, psDevInfo->uiFBDevID);
+       }
+
+       
+       OMAPLFBFreeKernelMem(psSwapChain->psBuffer);
+       OMAPLFBFreeKernelMem(psSwapChain);
+
+       psDevInfo->psSwapChain = NULL;
+
+       OMAPLFBFlip(psDevInfo, &psDevInfo->sSystemBuffer);
+       (void) OMAPLFBCheckModeAndSync(psDevInfo);
+
+       eError = PVRSRV_OK;
+
+ExitUnLock:
+       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+       return eError;
+}
+
+static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
+       IMG_HANDLE hSwapChain,
+       IMG_RECT *psRect)
+{
+       UNREFERENCED_PARAMETER(hDevice);
+       UNREFERENCED_PARAMETER(hSwapChain);
+       UNREFERENCED_PARAMETER(psRect);
+
+       
+       
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
+                                 IMG_HANDLE hSwapChain,
+                                 IMG_RECT *psRect)
+{
+       UNREFERENCED_PARAMETER(hDevice);
+       UNREFERENCED_PARAMETER(hSwapChain);
+       UNREFERENCED_PARAMETER(psRect);
+
+       
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
+                                      IMG_HANDLE hSwapChain,
+                                      IMG_UINT32 ui32CKColour)
+{
+       UNREFERENCED_PARAMETER(hDevice);
+       UNREFERENCED_PARAMETER(hSwapChain);
+       UNREFERENCED_PARAMETER(ui32CKColour);
+
+       
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
+                                      IMG_HANDLE hSwapChain,
+                                      IMG_UINT32 ui32CKColour)
+{
+       UNREFERENCED_PARAMETER(hDevice);
+       UNREFERENCED_PARAMETER(hSwapChain);
+       UNREFERENCED_PARAMETER(ui32CKColour);
+
+       
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
+                                 IMG_HANDLE hSwapChain,
+                                 IMG_UINT32 *pui32BufferCount,
+                                 IMG_HANDLE *phBuffer)
+{
+       OMAPLFB_DEVINFO   *psDevInfo;
+       OMAPLFB_SWAPCHAIN *psSwapChain;
+       PVRSRV_ERROR eError;
+       unsigned i;
+       
+       
+       if(!hDevice 
+       || !hSwapChain
+       || !pui32BufferCount
+       || !phBuffer)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       
+       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+       psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
+
+       OMAPLFBCreateSwapChainLock(psDevInfo);
+
+       if (SwapChainHasChanged(psDevInfo, psSwapChain))
+       {
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: Swap chain mismatch\n", __FUNCTION__, psDevInfo->uiFBDevID);
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto Exit;
+       }
+       
+       
+       *pui32BufferCount = (IMG_UINT32)psSwapChain->ulBufferCount;
+       
+       
+       for(i=0; i<psSwapChain->ulBufferCount; i++)
+       {
+               phBuffer[i] = (IMG_HANDLE)&psSwapChain->psBuffer[i];
+       }
+       
+       eError = PVRSRV_OK;
+
+Exit:
+       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+       return eError;
+}
+
+static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
+                                   IMG_HANDLE hBuffer,
+                                   IMG_UINT32 ui32SwapInterval,
+                                   IMG_HANDLE hPrivateTag,
+                                   IMG_UINT32 ui32ClipRectCount,
+                                   IMG_RECT *psClipRect)
+{
+       UNREFERENCED_PARAMETER(hDevice);
+       UNREFERENCED_PARAMETER(hBuffer);
+       UNREFERENCED_PARAMETER(ui32SwapInterval);
+       UNREFERENCED_PARAMETER(hPrivateTag);
+       UNREFERENCED_PARAMETER(ui32ClipRectCount);
+       UNREFERENCED_PARAMETER(psClipRect);
+       
+       
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice,
+                                   IMG_HANDLE hSwapChain)
+{
+       UNREFERENCED_PARAMETER(hDevice);
+       UNREFERENCED_PARAMETER(hSwapChain);
+       
+       
+       return PVRSRV_OK;
+}
+
+static OMAPLFB_BOOL WaitForVSyncSettle(OMAPLFB_DEVINFO *psDevInfo)
+{
+               unsigned i;
+               for(i = 0; i < OMAPLFB_VSYNC_SETTLE_COUNT; i++)
+               {
+                       if (DontWaitForVSync(psDevInfo) || !OMAPLFBWaitForVSync(psDevInfo))
+                       {
+                               return OMAPLFB_FALSE;
+                       }
+               }
+
+               return OMAPLFB_TRUE;
+}
+
+void OMAPLFBSwapHandler(OMAPLFB_BUFFER *psBuffer)
+{
+       OMAPLFB_DEVINFO *psDevInfo = psBuffer->psDevInfo;
+       OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
+       OMAPLFB_BOOL bPreviouslyNotVSynced;
+
+#if defined(SUPPORT_DRI_DRM)
+       if (!OMAPLFBAtomicBoolRead(&psDevInfo->sLeaveVT))
+#endif
+       {
+               OMAPLFBFlip(psDevInfo, psBuffer);
+       }
+
+       bPreviouslyNotVSynced = psSwapChain->bNotVSynced;
+       psSwapChain->bNotVSynced = OMAPLFB_TRUE;
+
+
+       if (!DontWaitForVSync(psDevInfo))
+       {
+               OMAPLFB_UPDATE_MODE eMode = OMAPLFBGetUpdateMode(psDevInfo);
+               int iBlankEvents = OMAPLFBAtomicIntRead(&psDevInfo->sBlankEvents);
+
+               switch(eMode)
+               {
+                       case OMAPLFB_UPDATE_MODE_AUTO:
+                               psSwapChain->bNotVSynced = OMAPLFB_FALSE;
+
+                               if (bPreviouslyNotVSynced || psSwapChain->iBlankEvents != iBlankEvents)
+                               {
+                                       psSwapChain->iBlankEvents = iBlankEvents;
+                                       psSwapChain->bNotVSynced = !WaitForVSyncSettle(psDevInfo);
+                               } else if (psBuffer->ulSwapInterval != 0)
+                               {
+                                       psSwapChain->bNotVSynced = !OMAPLFBWaitForVSync(psDevInfo);
+                               }
+                               break;
+#if defined(PVR_OMAPFB3_MANUAL_UPDATE_SYNC_IN_SWAP)
+                       case OMAPLFB_UPDATE_MODE_MANUAL:
+                               if (psBuffer->ulSwapInterval != 0)
+                               {
+                                       (void) OMAPLFBManualSync(psDevInfo);
+                               }
+                               break;
+#endif
+                       default:
+                               break;
+               }
+       }
+
+       psDevInfo->sPVRJTable.pfnPVRSRVCmdComplete((IMG_HANDLE)psBuffer->hCmdComplete, IMG_TRUE);
+}
+
+static IMG_BOOL ProcessFlip(IMG_HANDLE  hCmdCookie,
+                            IMG_UINT32  ui32DataSize,
+                            IMG_VOID   *pvData)
+{
+       DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
+       OMAPLFB_DEVINFO *psDevInfo;
+       OMAPLFB_BUFFER *psBuffer;
+       OMAPLFB_SWAPCHAIN *psSwapChain;
+
+       
+       if(!hCmdCookie || !pvData)
+       {
+               return IMG_FALSE;
+       }
+
+       
+       psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)pvData;
+
+       if (psFlipCmd == IMG_NULL || sizeof(DISPLAYCLASS_FLIP_COMMAND) != ui32DataSize)
+       {
+               return IMG_FALSE;
+       }
+
+       
+       psDevInfo = (OMAPLFB_DEVINFO*)psFlipCmd->hExtDevice;
+       psBuffer = (OMAPLFB_BUFFER*)psFlipCmd->hExtBuffer;
+       psSwapChain = (OMAPLFB_SWAPCHAIN*) psFlipCmd->hExtSwapChain;
+
+       OMAPLFBCreateSwapChainLock(psDevInfo);
+
+       if (SwapChainHasChanged(psDevInfo, psSwapChain))
+       {
+               
+               DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u (PVR Device ID %u): The swap chain has been destroyed\n",
+                       __FUNCTION__, psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID));
+       }
+       else
+       {
+               psBuffer->hCmdComplete = (OMAPLFB_HANDLE)hCmdCookie;
+               psBuffer->ulSwapInterval = (unsigned long)psFlipCmd->ui32SwapInterval;
+
+               OMAPLFBQueueBufferForSwap(psSwapChain, psBuffer);
+       }
+
+       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+       return IMG_TRUE;
+}
+
+
+static OMAPLFB_ERROR OMAPLFBInitFBDev(OMAPLFB_DEVINFO *psDevInfo)
+{
+       struct fb_info *psLINFBInfo;
+       struct module *psLINFBOwner;
+       OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo;
+       OMAPLFB_ERROR eError = OMAPLFB_ERROR_GENERIC;
+       unsigned long FBSize;
+       unsigned long ulLCM;
+       unsigned uiFBDevID = psDevInfo->uiFBDevID;
+
+       acquire_console_sem();
+
+       psLINFBInfo = registered_fb[uiFBDevID];
+       if (psLINFBInfo == NULL)
+       {
+               eError = OMAPLFB_ERROR_INVALID_DEVICE;
+               goto ErrorRelSem;
+       }
+
+       FBSize = (psLINFBInfo->screen_size) != 0 ?
+                                       psLINFBInfo->screen_size :
+                                       psLINFBInfo->fix.smem_len;
+
+       
+       if (FBSize == 0 || psLINFBInfo->fix.line_length == 0)
+       {
+               eError = OMAPLFB_ERROR_INVALID_DEVICE;
+               goto ErrorRelSem;
+       }
+
+       psLINFBOwner = psLINFBInfo->fbops->owner;
+       if (!try_module_get(psLINFBOwner))
+       {
+               printk(KERN_INFO DRIVER_PREFIX
+                       ": %s: Device %u: Couldn't get framebuffer module\n", __FUNCTION__, uiFBDevID);
+
+               goto ErrorRelSem;
+       }
+
+       if (psLINFBInfo->fbops->fb_open != NULL)
+       {
+               int res;
+
+               res = psLINFBInfo->fbops->fb_open(psLINFBInfo, 0);
+               if (res != 0)
+               {
+                       printk(KERN_INFO DRIVER_PREFIX
+                               " %s: Device %u: Couldn't open framebuffer(%d)\n", __FUNCTION__, uiFBDevID, res);
+
+                       goto ErrorModPut;
+               }
+       }
+
+       psDevInfo->psLINFBInfo = psLINFBInfo;
+
+       ulLCM = LCM(psLINFBInfo->fix.line_length, OMAPLFB_PAGE_SIZE);
+
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+                       ": Device %u: Framebuffer physical address: 0x%lx\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->fix.smem_start));
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+                       ": Device %u: Framebuffer virtual address: 0x%lx\n",
+                       psDevInfo->uiFBDevID, (unsigned long)psLINFBInfo->screen_base));
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+                       ": Device %u: Framebuffer size: %lu\n",
+                       psDevInfo->uiFBDevID, FBSize));
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+                       ": Device %u: Framebuffer virtual width: %u\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->var.xres_virtual));
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+                       ": Device %u: Framebuffer virtual height: %u\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->var.yres_virtual));
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+                       ": Device %u: Framebuffer width: %u\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->var.xres));
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+                       ": Device %u: Framebuffer height: %u\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->var.yres));
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+                       ": Device %u: Framebuffer stride: %u\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->fix.line_length));
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+                       ": Device %u: LCM of stride and page size: %lu\n",
+                       psDevInfo->uiFBDevID, ulLCM));
+
+       
+       psPVRFBInfo->sSysAddr.uiAddr = psLINFBInfo->fix.smem_start;
+       psPVRFBInfo->sCPUVAddr = psLINFBInfo->screen_base;
+
+       psPVRFBInfo->ulWidth = psLINFBInfo->var.xres;
+       psPVRFBInfo->ulHeight = psLINFBInfo->var.yres;
+       psPVRFBInfo->ulByteStride =  psLINFBInfo->fix.line_length;
+       psPVRFBInfo->ulFBSize = FBSize;
+       psPVRFBInfo->ulBufferSize = psPVRFBInfo->ulHeight * psPVRFBInfo->ulByteStride;
+       
+       psPVRFBInfo->ulRoundedBufferSize = RoundUpToMultiple(psPVRFBInfo->ulBufferSize, ulLCM);
+
+       if(psLINFBInfo->var.bits_per_pixel == 16)
+       {
+               if((psLINFBInfo->var.red.length == 5) &&
+                       (psLINFBInfo->var.green.length == 6) && 
+                       (psLINFBInfo->var.blue.length == 5) && 
+                       (psLINFBInfo->var.red.offset == 11) &&
+                       (psLINFBInfo->var.green.offset == 5) && 
+                       (psLINFBInfo->var.blue.offset == 0) && 
+                       (psLINFBInfo->var.red.msb_right == 0))
+               {
+                       psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_RGB565;
+               }
+               else
+               {
+                       printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown FB format\n", __FUNCTION__, uiFBDevID);
+               }
+       }
+       else if(psLINFBInfo->var.bits_per_pixel == 32)
+       {
+               if((psLINFBInfo->var.red.length == 8) &&
+                       (psLINFBInfo->var.green.length == 8) && 
+                       (psLINFBInfo->var.blue.length == 8) && 
+                       (psLINFBInfo->var.red.offset == 16) &&
+                       (psLINFBInfo->var.green.offset == 8) && 
+                       (psLINFBInfo->var.blue.offset == 0) && 
+                       (psLINFBInfo->var.red.msb_right == 0))
+               {
+                       psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_ARGB8888;
+               }
+               else
+               {
+                       printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown FB format\n", __FUNCTION__, uiFBDevID);
+               }
+       }       
+       else
+       {
+               printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown FB format\n", __FUNCTION__, uiFBDevID);
+       }
+
+       psDevInfo->sFBInfo.ulPhysicalWidthmm =
+               ((int)psLINFBInfo->var.width  > 0) ? psLINFBInfo->var.width  : 90;
+
+       psDevInfo->sFBInfo.ulPhysicalHeightmm =
+               ((int)psLINFBInfo->var.height > 0) ? psLINFBInfo->var.height : 54;
+
+       
+       psDevInfo->sFBInfo.sSysAddr.uiAddr = psPVRFBInfo->sSysAddr.uiAddr;
+       psDevInfo->sFBInfo.sCPUVAddr = psPVRFBInfo->sCPUVAddr;
+
+       eError = OMAPLFB_OK;
+       goto ErrorRelSem;
+
+ErrorModPut:
+       module_put(psLINFBOwner);
+ErrorRelSem:
+       release_console_sem();
+
+       return eError;
+}
+
+static void OMAPLFBDeInitFBDev(OMAPLFB_DEVINFO *psDevInfo)
+{
+       struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
+       struct module *psLINFBOwner;
+
+       acquire_console_sem();
+
+       psLINFBOwner = psLINFBInfo->fbops->owner;
+
+       if (psLINFBInfo->fbops->fb_release != NULL) 
+       {
+               (void) psLINFBInfo->fbops->fb_release(psLINFBInfo, 0);
+       }
+
+       module_put(psLINFBOwner);
+
+       release_console_sem();
+}
+
+static OMAPLFB_DEVINFO *OMAPLFBInitDev(unsigned uiFBDevID)
+{
+       PFN_CMD_PROC            pfnCmdProcList[OMAPLFB_COMMAND_COUNT];
+       IMG_UINT32              aui32SyncCountList[OMAPLFB_COMMAND_COUNT][2];
+       OMAPLFB_DEVINFO         *psDevInfo = NULL;
+
+       
+       psDevInfo = (OMAPLFB_DEVINFO *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_DEVINFO));
+
+       if(psDevInfo == NULL)
+       {
+               printk(KERN_ERR DRIVER_PREFIX
+                       ": %s: Device %u: Couldn't allocate device information structure\n", __FUNCTION__, uiFBDevID);
+
+               goto ErrorExit;
+       }
+
+       
+       memset(psDevInfo, 0, sizeof(OMAPLFB_DEVINFO));
+
+       psDevInfo->uiFBDevID = uiFBDevID;
+
+       
+       if(!(*gpfnGetPVRJTable)(&psDevInfo->sPVRJTable))
+       {
+               goto ErrorFreeDevInfo;
+       }
+
+       
+       if(OMAPLFBInitFBDev(psDevInfo) != OMAPLFB_OK)
+       {
+               
+               goto ErrorFreeDevInfo;
+       }
+
+       psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = (IMG_UINT32)(psDevInfo->sFBInfo.ulFBSize / psDevInfo->sFBInfo.ulRoundedBufferSize);
+       if (psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers != 0)
+       {
+               psDevInfo->sDisplayInfo.ui32MaxSwapChains = 1;
+               psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 1;
+       }
+
+       psDevInfo->sDisplayInfo.ui32PhysicalWidthmm = psDevInfo->sFBInfo.ulPhysicalWidthmm;
+       psDevInfo->sDisplayInfo.ui32PhysicalHeightmm = psDevInfo->sFBInfo.ulPhysicalHeightmm;
+
+       strncpy(psDevInfo->sDisplayInfo.szDisplayName, DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
+
+       psDevInfo->sDisplayFormat.pixelformat = psDevInfo->sFBInfo.ePixelFormat;
+       psDevInfo->sDisplayDim.ui32Width      = (IMG_UINT32)psDevInfo->sFBInfo.ulWidth;
+       psDevInfo->sDisplayDim.ui32Height     = (IMG_UINT32)psDevInfo->sFBInfo.ulHeight;
+       psDevInfo->sDisplayDim.ui32ByteStride = (IMG_UINT32)psDevInfo->sFBInfo.ulByteStride;
+
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+               ": Device %u: Maximum number of swap chain buffers: %u\n",
+               psDevInfo->uiFBDevID, psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers));
+
+       
+       psDevInfo->sSystemBuffer.sSysAddr = psDevInfo->sFBInfo.sSysAddr;
+       psDevInfo->sSystemBuffer.sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr;
+       psDevInfo->sSystemBuffer.psDevInfo = psDevInfo;
+
+       OMAPLFBInitBufferForSwap(&psDevInfo->sSystemBuffer);
+
+       
+
+       psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
+       psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
+       psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
+       psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
+       psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
+       psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
+       psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
+       psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
+       psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
+       psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
+       psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
+       psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
+       psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
+       psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
+       psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
+       psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
+       psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
+       psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
+
+       
+       if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice(
+               &psDevInfo->sDCJTable,
+               &psDevInfo->uiPVRDevID) != PVRSRV_OK)
+       {
+               printk(KERN_ERR DRIVER_PREFIX
+                       ": %s: Device %u: PVR Services device registration failed\n", __FUNCTION__, uiFBDevID);
+
+               goto ErrorDeInitFBDev;
+       }
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+               ": Device %u: PVR Device ID: %u\n",
+               psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID));
+       
+       
+       pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
+
+       
+       aui32SyncCountList[DC_FLIP_COMMAND][0] = 0; 
+       aui32SyncCountList[DC_FLIP_COMMAND][1] = 2; 
+
+       
+
+
+
+       if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList(psDevInfo->uiPVRDevID,
+                                                                                                                       &pfnCmdProcList[0],
+                                                                                                                       aui32SyncCountList,
+                                                                                                                       OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
+       {
+               printk(KERN_ERR DRIVER_PREFIX
+                       ": %s: Device %u: Couldn't register command processing functions with PVR Services\n", __FUNCTION__, uiFBDevID);
+               goto ErrorUnregisterDevice;
+       }
+
+       OMAPLFBCreateSwapChainLockInit(psDevInfo);
+
+       OMAPLFBAtomicBoolInit(&psDevInfo->sBlanked, OMAPLFB_FALSE);
+       OMAPLFBAtomicIntInit(&psDevInfo->sBlankEvents, 0);
+       OMAPLFBAtomicBoolInit(&psDevInfo->sFlushCommands, OMAPLFB_FALSE);
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+       OMAPLFBAtomicBoolInit(&psDevInfo->sEarlySuspendFlag, OMAPLFB_FALSE);
+#endif
+#if defined(SUPPORT_DRI_DRM)
+       OMAPLFBAtomicBoolInit(&psDevInfo->sLeaveVT, OMAPLFB_FALSE);
+#endif
+       return psDevInfo;
+
+ErrorUnregisterDevice:
+       (void)psDevInfo->sPVRJTable.pfnPVRSRVRemoveDCDevice(psDevInfo->uiPVRDevID);
+ErrorDeInitFBDev:
+       OMAPLFBDeInitFBDev(psDevInfo);
+ErrorFreeDevInfo:
+       OMAPLFBFreeKernelMem(psDevInfo);
+ErrorExit:
+       return NULL;
+}
+
+OMAPLFB_ERROR OMAPLFBInit(void)
+{
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
+       unsigned uiDevicesFound = 0;
+
+       if(OMAPLFBGetLibFuncAddr ("PVRGetDisplayClassJTable", &gpfnGetPVRJTable) != OMAPLFB_OK)
+       {
+               return OMAPLFB_ERROR_INIT_FAILURE;
+       }
+
+       
+       for(i = uiMaxFBDevIDPlusOne; i-- != 0;)
+       {
+               OMAPLFB_DEVINFO *psDevInfo = OMAPLFBInitDev(i);
+
+               if (psDevInfo != NULL)
+               {
+                       
+                       OMAPLFBSetDevInfoPtr(psDevInfo->uiFBDevID, psDevInfo);
+                       uiDevicesFound++;
+               }
+       }
+
+       return (uiDevicesFound != 0) ? OMAPLFB_OK : OMAPLFB_ERROR_INIT_FAILURE;
+}
+
+static OMAPLFB_BOOL OMAPLFBDeInitDev(OMAPLFB_DEVINFO *psDevInfo)
+{
+       PVRSRV_DC_DISP2SRV_KMJTABLE *psPVRJTable = &psDevInfo->sPVRJTable;
+
+       OMAPLFBCreateSwapChainLockDeInit(psDevInfo);
+
+       OMAPLFBAtomicBoolDeInit(&psDevInfo->sBlanked);
+       OMAPLFBAtomicIntDeInit(&psDevInfo->sBlankEvents);
+       OMAPLFBAtomicBoolDeInit(&psDevInfo->sFlushCommands);
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+       OMAPLFBAtomicBoolDeInit(&psDevInfo->sEarlySuspendFlag);
+#endif
+#if defined(SUPPORT_DRI_DRM)
+       OMAPLFBAtomicBoolDeInit(&psDevInfo->sLeaveVT);
+#endif
+       psPVRJTable = &psDevInfo->sPVRJTable;
+
+       if (psPVRJTable->pfnPVRSRVRemoveCmdProcList (psDevInfo->uiPVRDevID, OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
+       {
+               printk(KERN_ERR DRIVER_PREFIX
+                       ": %s: Device %u: PVR Device %u: Couldn't unregister command processing functions\n", __FUNCTION__, psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID);
+               return OMAPLFB_FALSE;
+       }
+
+       
+       if (psPVRJTable->pfnPVRSRVRemoveDCDevice(psDevInfo->uiPVRDevID) != PVRSRV_OK)
+       {
+               printk(KERN_ERR DRIVER_PREFIX
+                       ": %s: Device %u: PVR Device %u: Couldn't remove device from PVR Services\n", __FUNCTION__, psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID);
+               return OMAPLFB_FALSE;
+       }
+       
+       OMAPLFBDeInitFBDev(psDevInfo);
+
+       OMAPLFBSetDevInfoPtr(psDevInfo->uiFBDevID, NULL);
+
+       
+       OMAPLFBFreeKernelMem(psDevInfo);
+
+       return OMAPLFB_TRUE;
+}
+
+OMAPLFB_ERROR OMAPLFBDeInit(void)
+{
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
+       OMAPLFB_BOOL bError = OMAPLFB_FALSE;
+
+       for(i = 0; i < uiMaxFBDevIDPlusOne; i++)
+       {
+               OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
+
+               if (psDevInfo != NULL)
+               {
+                       bError |= !OMAPLFBDeInitDev(psDevInfo);
+               }
+       }
+
+       return (bError) ? OMAPLFB_ERROR_INIT_FAILURE : OMAPLFB_OK;
+}
+
diff --git a/services4/3rdparty/dc_omapfb3_linux/omaplfb_linux.c b/services4/3rdparty/dc_omapfb3_linux/omaplfb_linux.c
new file mode 100644 (file)
index 0000000..171e735
--- /dev/null
@@ -0,0 +1,746 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+
+#include <linux/version.h>
+
+#include <asm/atomic.h>
+
+#if defined(SUPPORT_DRI_DRM)
+#include <drm/drmP.h>
+#else
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hardirq.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/fb.h>
+#include <linux/console.h>
+#include <linux/omapfb.h>
+#include <linux/mutex.h>
+
+# include <plat/vrfb.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+#define PVR_OMAPFB3_NEEDS_PLAT_VRFB_H
+#endif
+
+#if defined(PVR_OMAPFB3_NEEDS_PLAT_VRFB_H)
+# include <plat/vrfb.h>
+#else
+# if defined(PVR_OMAPFB3_NEEDS_MACH_VRFB_H)
+#  include <mach/vrfb.h>
+# endif
+#endif
+
+#if defined(DEBUG)
+#define        PVR_DEBUG DEBUG
+#undef DEBUG
+#endif
+#include <omapfb/omapfb.h>
+#if defined(DEBUG)
+#undef DEBUG
+#endif
+#if defined(PVR_DEBUG)
+#define        DEBUG PVR_DEBUG
+#undef PVR_DEBUG
+#endif
+
+#include "img_defs.h"
+#include "servicesext.h"
+#include "kerneldisplay.h"
+#include "omaplfb.h"
+#include "pvrmodule.h"
+#if defined(SUPPORT_DRI_DRM)
+#include "pvr_drm.h"
+#include "3rdparty_dc_drm_shared.h"
+#endif
+
+#if !defined(PVR_LINUX_USING_WORKQUEUES)
+#error "PVR_LINUX_USING_WORKQUEUES must be defined"
+#endif
+
+MODULE_SUPPORTED_DEVICE(DEVNAME);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+#define OMAP_DSS_DRIVER(drv, dev) struct omap_dss_driver *drv = (dev) != NULL ? (dev)->driver : NULL
+#define OMAP_DSS_MANAGER(man, dev) struct omap_overlay_manager *man = (dev) != NULL ? (dev)->manager : NULL
+#define        WAIT_FOR_VSYNC(man)     ((man)->wait_for_vsync)
+#else
+#define OMAP_DSS_DRIVER(drv, dev) struct omap_dss_device *drv = (dev)
+#define OMAP_DSS_MANAGER(man, dev) struct omap_dss_device *man = (dev)
+#define        WAIT_FOR_VSYNC(man)     ((man)->wait_vsync)
+#endif
+
+
+void *OMAPLFBAllocKernelMem(unsigned long ulSize)
+{
+       return kmalloc(ulSize, GFP_KERNEL);
+}
+
+void OMAPLFBFreeKernelMem(void *pvMem)
+{
+       kfree(pvMem);
+}
+
+void OMAPLFBCreateSwapChainLockInit(OMAPLFB_DEVINFO *psDevInfo)
+{
+       mutex_init(&psDevInfo->sCreateSwapChainMutex);
+}
+
+void OMAPLFBCreateSwapChainLockDeInit(OMAPLFB_DEVINFO *psDevInfo)
+{
+       mutex_destroy(&psDevInfo->sCreateSwapChainMutex);
+}
+
+void OMAPLFBCreateSwapChainLock(OMAPLFB_DEVINFO *psDevInfo)
+{
+       mutex_lock(&psDevInfo->sCreateSwapChainMutex);
+}
+
+void OMAPLFBCreateSwapChainUnLock(OMAPLFB_DEVINFO *psDevInfo)
+{
+       mutex_unlock(&psDevInfo->sCreateSwapChainMutex);
+}
+
+void OMAPLFBAtomicBoolInit(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal)
+{
+       atomic_set(psAtomic, (int)bVal);
+}
+
+void OMAPLFBAtomicBoolDeInit(OMAPLFB_ATOMIC_BOOL *psAtomic)
+{
+}
+
+void OMAPLFBAtomicBoolSet(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal)
+{
+       atomic_set(psAtomic, (int)bVal);
+}
+
+OMAPLFB_BOOL OMAPLFBAtomicBoolRead(OMAPLFB_ATOMIC_BOOL *psAtomic)
+{
+       return (OMAPLFB_BOOL)atomic_read(psAtomic);
+}
+
+void OMAPLFBAtomicIntInit(OMAPLFB_ATOMIC_INT *psAtomic, int iVal)
+{
+       atomic_set(psAtomic, iVal);
+}
+
+void OMAPLFBAtomicIntDeInit(OMAPLFB_ATOMIC_INT *psAtomic)
+{
+}
+
+void OMAPLFBAtomicIntSet(OMAPLFB_ATOMIC_INT *psAtomic, int iVal)
+{
+       atomic_set(psAtomic, iVal);
+}
+
+int OMAPLFBAtomicIntRead(OMAPLFB_ATOMIC_INT *psAtomic)
+{
+       return atomic_read(psAtomic);
+}
+
+void OMAPLFBAtomicIntInc(OMAPLFB_ATOMIC_INT *psAtomic)
+{
+       atomic_inc(psAtomic);
+}
+
+OMAPLFB_ERROR OMAPLFBGetLibFuncAddr (char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
+{
+       if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
+       {
+               return (OMAPLFB_ERROR_INVALID_PARAMS);
+       }
+
+       
+       *ppfnFuncTable = PVRGetDisplayClassJTable;
+
+       return (OMAPLFB_OK);
+}
+
+void OMAPLFBQueueBufferForSwap(OMAPLFB_SWAPCHAIN *psSwapChain, OMAPLFB_BUFFER *psBuffer)
+{
+       int res = queue_work(psSwapChain->psWorkQueue, &psBuffer->sWork);
+
+       if (res == 0)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Buffer already on work queue\n", __FUNCTION__, psSwapChain->uiFBDevID);
+       }
+}
+
+static void WorkQueueHandler(struct work_struct *psWork)
+{
+       OMAPLFB_BUFFER *psBuffer = container_of(psWork, OMAPLFB_BUFFER, sWork);
+
+       OMAPLFBSwapHandler(psBuffer);
+}
+
+OMAPLFB_ERROR OMAPLFBCreateSwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
+{
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
+        psSwapChain->psWorkQueue = __create_workqueue(DEVNAME, 1, 1, 1);
+#else  
+       psSwapChain->psWorkQueue = create_workqueue(DEVNAME);
+#endif
+       if (psSwapChain->psWorkQueue == NULL)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: create_singlethreaded_workqueue failed\n", __FUNCTION__, psSwapChain->uiFBDevID);
+
+               return (OMAPLFB_ERROR_INIT_FAILURE);
+       }
+
+       return (OMAPLFB_OK);
+}
+
+void OMAPLFBInitBufferForSwap(OMAPLFB_BUFFER *psBuffer)
+{
+       INIT_WORK(&psBuffer->sWork, WorkQueueHandler);
+}
+
+void OMAPLFBDestroySwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
+{
+       destroy_workqueue(psSwapChain->psWorkQueue);
+}
+
+void OMAPLFBFlip(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_BUFFER *psBuffer)
+{
+       struct fb_var_screeninfo sFBVar;
+       int res;
+       unsigned long ulYResVirtual;
+
+       acquire_console_sem();
+
+       sFBVar = psDevInfo->psLINFBInfo->var;
+
+       sFBVar.xoffset = 0;
+       sFBVar.yoffset = psBuffer->ulYOffset;
+
+       ulYResVirtual = psBuffer->ulYOffset + sFBVar.yres;
+
+       
+       if (sFBVar.xres_virtual != sFBVar.xres || sFBVar.yres_virtual < ulYResVirtual)
+       {
+               sFBVar.xres_virtual = sFBVar.xres;
+               sFBVar.yres_virtual = ulYResVirtual;
+
+               sFBVar.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+
+               res = fb_set_var(psDevInfo->psLINFBInfo, &sFBVar);
+               if (res != 0)
+               {
+                       printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: fb_set_var failed (Y Offset: %lu, Error: %d)\n", __FUNCTION__, psDevInfo->uiFBDevID, psBuffer->ulYOffset, res);
+               }
+       }
+       else
+       {
+               res = fb_pan_display(psDevInfo->psLINFBInfo, &sFBVar);
+               if (res != 0)
+               {
+                       printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: fb_pan_display failed (Y Offset: %lu, Error: %d)\n", __FUNCTION__, psDevInfo->uiFBDevID, psBuffer->ulYOffset, res);
+               }
+       }
+
+       release_console_sem();
+}
+
+OMAPLFB_UPDATE_MODE OMAPLFBGetUpdateMode(OMAPLFB_DEVINFO *psDevInfo)
+{
+       struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+       OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
+
+       enum omap_dss_update_mode eMode;
+
+       if (psDSSDrv == NULL || psDSSDrv->get_update_mode == NULL)
+       {
+               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Can't get update mode\n", __FUNCTION__, psDevInfo->uiFBDevID));
+               return OMAPLFB_UPDATE_MODE_UNDEFINED;
+       }
+
+       eMode = psDSSDrv->get_update_mode(psDSSDev);
+       switch(eMode)
+       {
+               case OMAP_DSS_UPDATE_AUTO:
+                       return OMAPLFB_UPDATE_MODE_AUTO;
+               case OMAP_DSS_UPDATE_MANUAL:
+                       return OMAPLFB_UPDATE_MODE_MANUAL;
+               case OMAP_DSS_UPDATE_DISABLED:
+                       return OMAPLFB_UPDATE_MODE_DISABLED;
+               default:
+                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown update mode (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, eMode));
+                       break;
+       }
+
+       return OMAPLFB_UPDATE_MODE_UNDEFINED;
+//return OMAPLFB_UPDATE_MODE_AUTO;
+}
+
+OMAPLFB_BOOL OMAPLFBSetUpdateMode(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_UPDATE_MODE eMode)
+{
+       struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+       OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
+       enum omap_dss_update_mode eDSSMode;
+       int res;
+
+       if (psDSSDrv == NULL || psDSSDrv->set_update_mode == NULL)
+       {
+               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Can't set update mode\n", __FUNCTION__, psDevInfo->uiFBDevID));
+               return OMAPLFB_FALSE;
+       }
+
+       switch(eMode)
+       {
+               case OMAPLFB_UPDATE_MODE_AUTO:
+                       eDSSMode = OMAP_DSS_UPDATE_AUTO;
+                       break;
+               case OMAPLFB_UPDATE_MODE_MANUAL:
+                       eDSSMode = OMAP_DSS_UPDATE_MANUAL;
+                       break;
+               case OMAPLFB_UPDATE_MODE_DISABLED:
+                       eDSSMode = OMAP_DSS_UPDATE_DISABLED;
+                       break;
+               default:
+                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown update mode (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, eMode));
+                       return OMAPLFB_FALSE;
+       }
+
+       res = psDSSDrv->set_update_mode(psDSSDev, eDSSMode);
+       if (res != 0)
+       {
+               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: set_update_mode failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res));
+       }
+
+       return (res == 0);
+//return 1;
+}
+
+OMAPLFB_BOOL OMAPLFBWaitForVSync(OMAPLFB_DEVINFO *psDevInfo)
+{
+       struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+       OMAP_DSS_MANAGER(psDSSMan, psDSSDev);
+
+       if (psDSSMan != NULL && WAIT_FOR_VSYNC(psDSSMan) != NULL)
+       {
+               int res = WAIT_FOR_VSYNC(psDSSMan)(psDSSMan);
+               if (res != 0)
+               {
+                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Wait for vsync failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res));
+                       return OMAPLFB_FALSE;
+               }
+       }
+
+       return OMAPLFB_TRUE;
+}
+
+OMAPLFB_BOOL OMAPLFBManualSync(OMAPLFB_DEVINFO *psDevInfo)
+{
+       struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+       OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
+
+       if (psDSSDrv != NULL && psDSSDrv->sync != NULL)
+       {
+               int res = psDSSDrv->sync(psDSSDev);
+               if (res != 0)
+               {
+                       printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Sync failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+                       return OMAPLFB_FALSE;
+               }
+       }
+       return OMAPLFB_TRUE;
+}
+
+OMAPLFB_BOOL OMAPLFBCheckModeAndSync(OMAPLFB_DEVINFO *psDevInfo)
+{
+       OMAPLFB_UPDATE_MODE eMode = OMAPLFBGetUpdateMode(psDevInfo);
+
+       switch(eMode)
+       {
+               case OMAPLFB_UPDATE_MODE_AUTO:
+               case OMAPLFB_UPDATE_MODE_MANUAL:
+                       return OMAPLFBManualSync(psDevInfo);
+               default:
+                       break;
+       }
+
+       return OMAPLFB_TRUE;
+}
+
+static int OMAPLFBFrameBufferEvents(struct notifier_block *psNotif,
+                             unsigned long event, void *data)
+{
+       OMAPLFB_DEVINFO *psDevInfo;
+       struct fb_event *psFBEvent = (struct fb_event *)data;
+       struct fb_info *psFBInfo = psFBEvent->info;
+       OMAPLFB_BOOL bBlanked;
+
+       
+       if (event != FB_EVENT_BLANK)
+       {
+               return 0;
+       }
+
+       bBlanked = (*(IMG_INT *)psFBEvent->data != 0) ? OMAPLFB_TRUE: OMAPLFB_FALSE;
+
+       psDevInfo = OMAPLFBGetDevInfoPtr(psFBInfo->node);
+
+#if 0
+       if (psDevInfo != NULL)
+       {
+               if (bBlanked)
+               {
+                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Blank event received\n", __FUNCTION__, psDevInfo->uiFBDevID));
+               }
+               else
+               {
+                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unblank event received\n", __FUNCTION__, psDevInfo->uiFBDevID));
+               }
+       }
+       else
+       {
+               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Blank/Unblank event for unknown framebuffer\n", __FUNCTION__, psFBInfo->node));
+       }
+#endif
+
+       if (psDevInfo != NULL)
+       {
+               OMAPLFBAtomicBoolSet(&psDevInfo->sBlanked, bBlanked);
+               OMAPLFBAtomicIntInc(&psDevInfo->sBlankEvents);
+       }
+
+       return 0;
+}
+
+OMAPLFB_ERROR OMAPLFBUnblankDisplay(OMAPLFB_DEVINFO *psDevInfo)
+{
+       int res;
+
+       acquire_console_sem();
+       res = fb_blank(psDevInfo->psLINFBInfo, 0);
+       release_console_sem();
+       if (res != 0 && res != -EINVAL)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: fb_blank failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+               return (OMAPLFB_ERROR_GENERIC);
+       }
+
+       return (OMAPLFB_OK);
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+
+static void OMAPLFBBlankDisplay(OMAPLFB_DEVINFO *psDevInfo)
+{
+       acquire_console_sem();
+       fb_blank(psDevInfo->psLINFBInfo, 1);
+       release_console_sem();
+}
+
+static void OMAPLFBEarlySuspendHandler(struct early_suspend *h)
+{
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
+
+       for (i=0; i < uiMaxFBDevIDPlusOne; i++)
+       {
+               OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
+
+               if (psDevInfo != NULL)
+               {
+                       OMAPLFBAtomicBoolSet(&psDevInfo->sEarlySuspendFlag, OMAPLFB_TRUE);
+                       OMAPLFBBlankDisplay(psDevInfo);
+               }
+       }
+}
+
+static void OMAPLFBEarlyResumeHandler(struct early_suspend *h)
+{
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
+
+       for (i=0; i < uiMaxFBDevIDPlusOne; i++)
+       {
+               OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
+
+               if (psDevInfo != NULL)
+               {
+                       OMAPLFBUnblankDisplay(psDevInfo);
+                       OMAPLFBAtomicBoolSet(&psDevInfo->sEarlySuspendFlag, OMAPLFB_FALSE);
+               }
+       }
+}
+
+#endif 
+
+OMAPLFB_ERROR OMAPLFBEnableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo)
+{
+       int                res;
+       OMAPLFB_ERROR         eError;
+
+       
+       memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
+
+       psDevInfo->sLINNotifBlock.notifier_call = OMAPLFBFrameBufferEvents;
+
+       OMAPLFBAtomicBoolSet(&psDevInfo->sBlanked, OMAPLFB_FALSE);
+       OMAPLFBAtomicIntSet(&psDevInfo->sBlankEvents, 0);
+
+       res = fb_register_client(&psDevInfo->sLINNotifBlock);
+       if (res != 0)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: fb_register_client failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+
+               return (OMAPLFB_ERROR_GENERIC);
+       }
+
+       eError = OMAPLFBUnblankDisplay(psDevInfo);
+       if (eError != OMAPLFB_OK)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: UnblankDisplay failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, eError);
+               return eError;
+       }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+       psDevInfo->sEarlySuspend.suspend = OMAPLFBEarlySuspendHandler;
+       psDevInfo->sEarlySuspend.resume = OMAPLFBEarlyResumeHandler;
+       psDevInfo->sEarlySuspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN;
+       register_early_suspend(&psDevInfo->sEarlySuspend);
+#endif
+
+       return (OMAPLFB_OK);
+}
+
+OMAPLFB_ERROR OMAPLFBDisableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo)
+{
+       int res;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+       unregister_early_suspend(&psDevInfo->sEarlySuspend);
+#endif
+
+       
+       res = fb_unregister_client(&psDevInfo->sLINNotifBlock);
+       if (res != 0)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: fb_unregister_client failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+               return (OMAPLFB_ERROR_GENERIC);
+       }
+
+       OMAPLFBAtomicBoolSet(&psDevInfo->sBlanked, OMAPLFB_FALSE);
+
+       return (OMAPLFB_OK);
+}
+
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL)
+static OMAPLFB_DEVINFO *OMAPLFBPVRDevIDToDevInfo(unsigned uiPVRDevID)
+{
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
+
+       for (i=0; i < uiMaxFBDevIDPlusOne; i++)
+       {
+               OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
+
+               if (psDevInfo->uiPVRDevID == uiPVRDevID)
+               {
+                       return psDevInfo;
+               }
+       }
+
+       printk(KERN_WARNING DRIVER_PREFIX
+               ": %s: PVR Device %u: Couldn't find device\n", __FUNCTION__, uiPVRDevID);
+
+       return NULL;
+}
+
+int PVR_DRM_MAKENAME(omaplfb, _Ioctl)(struct drm_device unref__ *dev, void *arg, struct drm_file unref__ *pFile)
+{
+       uint32_t *puiArgs;
+       uint32_t uiCmd;
+       unsigned uiPVRDevID;
+       int ret = 0;
+       OMAPLFB_DEVINFO *psDevInfo;
+
+       if (arg == NULL)
+       {
+               return -EFAULT;
+       }
+
+       puiArgs = (uint32_t *)arg;
+       uiCmd = puiArgs[PVR_DRM_DISP_ARG_CMD];
+       uiPVRDevID = puiArgs[PVR_DRM_DISP_ARG_DEV];
+
+       psDevInfo = OMAPLFBPVRDevIDToDevInfo(uiPVRDevID);
+       if (psDevInfo == NULL)
+       {
+               return -EINVAL;
+       }
+
+
+       switch (uiCmd)
+       {
+               case PVR_DRM_DISP_CMD_LEAVE_VT:
+               case PVR_DRM_DISP_CMD_ENTER_VT:
+               {
+                       OMAPLFB_BOOL bLeaveVT = (uiCmd == PVR_DRM_DISP_CMD_LEAVE_VT);
+                       DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: PVR Device %u: %s\n",
+                               __FUNCTION__, uiPVRDevID,
+                               bLeaveVT ? "Leave VT" : "Enter VT"));
+
+                       OMAPLFBCreateSwapChainLock(psDevInfo);
+                       
+                       OMAPLFBAtomicBoolSet(&psDevInfo->sLeaveVT, bLeaveVT);
+                       if (psDevInfo->psSwapChain != NULL)
+                       {
+                               flush_workqueue(psDevInfo->psSwapChain->psWorkQueue);
+
+                               if (bLeaveVT)
+                               {
+                                       OMAPLFBFlip(psDevInfo, &psDevInfo->sSystemBuffer);
+                                       (void) OMAPLFBCheckModeAndSync(psDevInfo);
+                               }
+                       }
+
+                       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+                       (void) OMAPLFBUnblankDisplay(psDevInfo);
+                       break;
+               }
+               case PVR_DRM_DISP_CMD_ON:
+               case PVR_DRM_DISP_CMD_STANDBY:
+               case PVR_DRM_DISP_CMD_SUSPEND:
+               case PVR_DRM_DISP_CMD_OFF:
+               {
+                       int iFBMode;
+#if defined(DEBUG)
+                       {
+                               const char *pszMode;
+                               switch(uiCmd)
+                               {
+                                       case PVR_DRM_DISP_CMD_ON:
+                                               pszMode = "On";
+                                               break;
+                                       case PVR_DRM_DISP_CMD_STANDBY:
+                                               pszMode = "Standby";
+                                               break;
+                                       case PVR_DRM_DISP_CMD_SUSPEND:
+                                               pszMode = "Suspend";
+                                               break;
+                                       case PVR_DRM_DISP_CMD_OFF:
+                                               pszMode = "Off";
+                                               break;
+                                       default:
+                                               pszMode = "(Unknown Mode)";
+                                               break;
+                               }
+                               printk (KERN_WARNING DRIVER_PREFIX ": %s: PVR Device %u: Display %s\n",
+                               __FUNCTION__, uiPVRDevID, pszMode);
+                       }
+#endif
+                       switch(uiCmd)
+                       {
+                               case PVR_DRM_DISP_CMD_ON:
+                                       iFBMode = FB_BLANK_UNBLANK;
+                                       break;
+                               case PVR_DRM_DISP_CMD_STANDBY:
+                                       iFBMode = FB_BLANK_HSYNC_SUSPEND;
+                                       break;
+                               case PVR_DRM_DISP_CMD_SUSPEND:
+                                       iFBMode = FB_BLANK_VSYNC_SUSPEND;
+                                       break;
+                               case PVR_DRM_DISP_CMD_OFF:
+                                       iFBMode = FB_BLANK_POWERDOWN;
+                                       break;
+                               default:
+                                       return -EINVAL;
+                       }
+
+                       OMAPLFBCreateSwapChainLock(psDevInfo);
+
+                       if (psDevInfo->psSwapChain != NULL)
+                       {
+                               flush_workqueue(psDevInfo->psSwapChain->psWorkQueue);
+                       }
+
+                       acquire_console_sem();
+                       ret = fb_blank(psDevInfo->psLINFBInfo, iFBMode);
+                       release_console_sem();
+
+                       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+                       break;
+               }
+               default:
+               {
+                       ret = -EINVAL;
+                       break;
+               }
+       }
+
+       return ret;
+}
+#endif
+
+#if defined(SUPPORT_DRI_DRM)
+int PVR_DRM_MAKENAME(omaplfb, _Init)(struct drm_device unref__ *dev)
+#else
+static int __init OMAPLFB_Init(void)
+#endif
+{
+
+       if(OMAPLFBInit() != OMAPLFB_OK)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: OMAPLFBInit failed\n", __FUNCTION__);
+               return -ENODEV;
+       }
+
+       return 0;
+
+}
+
+#if defined(SUPPORT_DRI_DRM)
+void PVR_DRM_MAKENAME(omaplfb, _Cleanup)(struct drm_device unref__ *dev)
+#else
+static void __exit OMAPLFB_Cleanup(void)
+#endif
+{    
+       if(OMAPLFBDeInit() != OMAPLFB_OK)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: OMAPLFBDeInit failed\n", __FUNCTION__);
+       }
+}
+
+#if !defined(SUPPORT_DRI_DRM)
+late_initcall(OMAPLFB_Init);
+module_exit(OMAPLFB_Cleanup);
+#endif
diff --git a/services4/3rdparty/dc_ti8168_linux/Kbuild b/services4/3rdparty/dc_ti8168_linux/Kbuild
deleted file mode 100644 (file)
index aba88c9..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-SYS_USING_INTERRUPTS = 1
-SUPPORT_TI_DSS_FW = 0
-
-SYS_CFLAGS.$(SYS_USING_INTERRUPTS)                      += -DSYS_USING_INTERRUPTS
-SYS_CFLAGS.$(SUPPORT_TI_DSS_FW)                         += -DSUPPORT_TI_DSS_FW
-
-
-EXTRA_CFLAGS = -DLINUX \
-               -DCONFIG_OMAP2_DSS \
-               -I$(PVR_BUILD_DIR)/include4 \
-               -I$(PVR_BUILD_DIR)/services4/include \
-               -I$(PVR_BUILD_DIR)/services4/system/$(PVR_SYSTEM) \
-               -I$(PVR_BUILD_DIR)/services4/system/include \
-               $(SYS_CFLAGS.1) \
-
-obj-m := omaplfb.o
-omaplfb-y := omaplfb_displayclass.o omaplfb_linux.o
diff --git a/services4/3rdparty/dc_ti8168_linux/omaplfb.h b/services4/3rdparty/dc_ti8168_linux/omaplfb.h
deleted file mode 100644 (file)
index 66ff328..0000000
+++ /dev/null
@@ -1,282 +0,0 @@
-/**********************************************************************
- *
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
- * 
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- * 
- * This program is distributed in the hope it will be useful but, except 
- * as otherwise stated in writing, without any warranty; without even the 
- * implied warranty of merchantability or fitness for a particular purpose. 
- * See the GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * 
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
- *
- ******************************************************************************/
-
-#ifndef __OMAPLFB_H__
-#define __OMAPLFB_H__
-
-extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
-
-#define OMAPLCD_IRQ                    25
-
-#define OMAPLCD_SYSCONFIG           0x0410
-#define OMAPLCD_CONFIG              0x0444
-#define OMAPLCD_DEFAULT_COLOR0      0x044C
-#define OMAPLCD_TIMING_H            0x0464
-#define OMAPLCD_TIMING_V            0x0468
-#define OMAPLCD_POL_FREQ            0x046C
-#define OMAPLCD_DIVISOR             0x0470
-#define OMAPLCD_SIZE_DIG            0x0478
-#define OMAPLCD_SIZE_LCD            0x047C
-#define OMAPLCD_GFX_POSITION        0x0488
-#define OMAPLCD_GFX_SIZE            0x048C
-#define OMAPLCD_GFX_ATTRIBUTES      0x04a0
-#define OMAPLCD_GFX_FIFO_THRESHOLD  0x04a4
-#define OMAPLCD_GFX_WINDOW_SKIP     0x04b4
-
-#define OMAPLCD_IRQSTATUS       0x0418
-#define OMAPLCD_IRQENABLE       0x041c
-#define OMAPLCD_CONTROL         0x0440
-#define OMAPLCD_GFX_BA0         0x0480
-#define OMAPLCD_GFX_BA1         0x0484
-#define OMAPLCD_GFX_ROW_INC     0x04ac
-#define OMAPLCD_GFX_PIX_INC     0x04b0
-#define OMAPLCD_VID1_BA0        0x04bc
-#define OMAPLCD_VID1_BA1        0x04c0
-#define OMAPLCD_VID1_ROW_INC    0x04d8
-#define OMAPLCD_VID1_PIX_INC    0x04dc
-
-#define        OMAP_CONTROL_GODIGITAL      (1 << 6)
-#define        OMAP_CONTROL_GOLCD          (1 << 5)
-#define        OMAP_CONTROL_DIGITALENABLE  (1 << 1)
-#define        OMAP_CONTROL_LCDENABLE      (1 << 0)
-
-#define OMAPLCD_INTMASK_VSYNC       (1 << 1)
-#define OMAPLCD_INTMASK_OFF            0
-
-typedef void *       OMAP_HANDLE;
-
-typedef enum tag_omap_bool
-{
-       OMAP_FALSE = 0,
-       OMAP_TRUE  = 1,
-} OMAP_BOOL, *OMAP_PBOOL;
-
-typedef struct OMAPLFB_BUFFER_TAG
-{
-       unsigned long                ulBufferSize;
-
-       
-       
-
-       IMG_SYS_PHYADDR              sSysAddr;
-       IMG_CPU_VIRTADDR             sCPUVAddr;
-       PVRSRV_SYNC_DATA            *psSyncData;
-
-       struct OMAPLFB_BUFFER_TAG       *psNext;
-} OMAPLFB_BUFFER;
-
-typedef struct OMAPLFB_VSYNC_FLIP_ITEM_TAG
-{
-       
-
-
-       OMAP_HANDLE      hCmdComplete;
-       
-       unsigned long    ulSwapInterval;
-       
-       OMAP_BOOL        bValid;
-       
-       OMAP_BOOL        bFlipped;
-       
-       OMAP_BOOL        bCmdCompleted;
-
-       
-       
-
-       
-       IMG_SYS_PHYADDR* sSysAddr;
-} OMAPLFB_VSYNC_FLIP_ITEM;
-
-typedef struct PVRPDP_SWAPCHAIN_TAG
-{
-       
-       unsigned long       ulBufferCount;
-       
-       OMAPLFB_BUFFER     *psBuffer;
-       
-       OMAPLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
-
-       
-       unsigned long       ulInsertIndex;
-       
-       
-       unsigned long       ulRemoveIndex;
-
-       
-       void *pvRegs;
-
-       
-       PVRSRV_DC_DISP2SRV_KMJTABLE     *psPVRJTable;
-
-       
-       OMAP_BOOL           bFlushCommands;
-
-       
-       unsigned long       ulSetFlushStateRefCount;
-
-       
-       OMAP_BOOL           bBlanked;
-
-       
-       spinlock_t         *psSwapChainLock;
-} OMAPLFB_SWAPCHAIN;
-
-typedef struct OMAPLFB_FBINFO_TAG
-{
-       unsigned long       ulFBSize;
-       unsigned long       ulBufferSize;
-       unsigned long       ulRoundedBufferSize;
-       unsigned long       ulWidth;
-       unsigned long       ulHeight;
-       unsigned long       ulByteStride;
-
-       
-       
-       IMG_SYS_PHYADDR     sSysAddr;
-       IMG_CPU_VIRTADDR    sCPUVAddr;
-
-       
-       PVRSRV_PIXEL_FORMAT ePixelFormat;
-}OMAPLFB_FBINFO;
-
-typedef struct OMAPLFB_DEVINFO_TAG
-{
-       unsigned long           ulDeviceID;
-
-       
-       OMAPLFB_BUFFER          sSystemBuffer;
-
-       
-       PVRSRV_DC_DISP2SRV_KMJTABLE     sPVRJTable;
-       
-       
-       PVRSRV_DC_SRV2DISP_KMJTABLE     sDCJTable;
-
-       
-       OMAPLFB_FBINFO          sFBInfo;
-
-       
-       unsigned long           ulRefCount;
-
-       
-       OMAPLFB_SWAPCHAIN      *psSwapChain;
-
-       
-       OMAP_BOOL               bFlushCommands;
-
-       
-       struct fb_info         *psLINFBInfo;
-
-       
-       struct notifier_block   sLINNotifBlock;
-
-       
-       OMAP_BOOL               bDeviceSuspended;
-
-       
-       spinlock_t             sSwapChainLock;
-
-       
-       
-
-       
-       IMG_DEV_VIRTADDR                sDisplayDevVAddr;
-
-       DISPLAY_INFO            sDisplayInfo;
-
-       
-       DISPLAY_FORMAT          sDisplayFormat;
-       
-       
-       DISPLAY_DIMS            sDisplayDim;
-
-}  OMAPLFB_DEVINFO;
-
-#define        OMAPLFB_PAGE_SIZE 4096
-#define        OMAPLFB_PAGE_MASK (OMAPLFB_PAGE_SIZE - 1)
-#define        OMAPLFB_PAGE_TRUNC (~OMAPLFB_PAGE_MASK)
-
-#define        OMAPLFB_PAGE_ROUNDUP(x) (((x) + OMAPLFB_PAGE_MASK) & OMAPLFB_PAGE_TRUNC)
-
-//#define DEBUG
-
-#ifdef DEBUG
-#define        DEBUG_PRINTK(x) printk x
-#else
-#define        DEBUG_PRINTK(x)
-#endif
-
-#define DISPLAY_DEVICE_NAME "PowerVR OMAP Linux Display Driver"
-#define        DRVNAME "omaplfb"
-#define        DEVNAME DRVNAME
-#define        DRIVER_PREFIX DRVNAME
-
-typedef enum _OMAP_ERROR_
-{
-       OMAP_OK                             =  0,
-       OMAP_ERROR_GENERIC                  =  1,
-       OMAP_ERROR_OUT_OF_MEMORY            =  2,
-       OMAP_ERROR_TOO_FEW_BUFFERS          =  3,
-       OMAP_ERROR_INVALID_PARAMS           =  4,
-       OMAP_ERROR_INIT_FAILURE             =  5,
-       OMAP_ERROR_CANT_REGISTER_CALLBACK   =  6,
-       OMAP_ERROR_INVALID_DEVICE           =  7,
-       OMAP_ERROR_DEVICE_REGISTER_FAILED   =  8
-} OMAP_ERROR;
-
-
-#ifndef UNREFERENCED_PARAMETER
-#define        UNREFERENCED_PARAMETER(param) (param) = (param)
-#endif
-
-OMAP_ERROR OMAPLFBInit(void);
-OMAP_ERROR OMAPLFBDeinit(void);
-
-#ifdef LDM_PLATFORM
-void OMAPLFBDriverSuspend(void);
-void OMAPLFBDriverResume(void);
-#endif
-
-void *OMAPLFBAllocKernelMem(unsigned long ulSize);
-void OMAPLFBFreeKernelMem(void *pvMem);
-OMAP_ERROR OMAPLFBGetLibFuncAddr(char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
-OMAP_ERROR OMAPLFBInstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain);
-OMAP_ERROR OMAPLFBUninstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain);
-OMAP_BOOL OMAPLFBVSyncIHandler(OMAPLFB_SWAPCHAIN *psSwapChain);
-void OMAPLFBEnableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain);
-void OMAPLFBDisableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain);
-#if defined (SUPPORT_TI_DSS_FW)
-void OMAPLFBEnableDisplayRegisterAccess(void);
-void OMAPLFBDisableDisplayRegisterAccess(void);
-#endif
-#if defined (CONFIG_OMAP2_DSS)
-IMG_VOID OMAPLFBFlipDSS2(OMAPLFB_SWAPCHAIN *psSwapChain,
-                                                 IMG_UINT32 aPhyAddr);
-#endif
-void OMAPLFBFlip(OMAPLFB_SWAPCHAIN *psSwapChain, unsigned long aPhyAddr);
-
-#endif 
-
diff --git a/services4/3rdparty/dc_ti8168_linux/omaplfb_displayclass.c b/services4/3rdparty/dc_ti8168_linux/omaplfb_displayclass.c
deleted file mode 100644 (file)
index 8282c0b..0000000
+++ /dev/null
@@ -1,1545 +0,0 @@
-/**********************************************************************
- *
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
- * 
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- * 
- * This program is distributed in the hope it will be useful but, except 
- * as otherwise stated in writing, without any warranty; without even the 
- * implied warranty of merchantability or fitness for a particular purpose. 
- * See the GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * 
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
- *
- ******************************************************************************/
-
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/console.h>
-#include <linux/fb.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/notifier.h>
-#include <linux/spinlock.h>
-
-#include "img_defs.h"
-#include "servicesext.h"
-#include "kerneldisplay.h"
-#include "omaplfb.h"
-
-static void *gpvAnchor;
-
-static int fb_idx = 0;
-
-#define OMAPLFB_COMMAND_COUNT          1
-
-static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = 0;
-
-static OMAPLFB_DEVINFO * GetAnchorPtr(void)
-{
-       return (OMAPLFB_DEVINFO *)gpvAnchor;
-}
-
-static void SetAnchorPtr(OMAPLFB_DEVINFO *psDevInfo)
-{
-       gpvAnchor = (void*)psDevInfo;
-}
-
-       
-static void FlushInternalVSyncQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-       OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem;
-       unsigned long            ulMaxIndex;
-       unsigned long            i;
-
-       
-       psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
-       ulMaxIndex = psSwapChain->ulBufferCount - 1;
-
-       for(i = 0; i < psSwapChain->ulBufferCount; i++)
-       {
-               if (psFlipItem->bValid == OMAP_FALSE)
-               {
-                       continue;
-               }
-
-               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": FlushInternalVSyncQueue: Flushing swap buffer (index %lu)\n", psSwapChain->ulRemoveIndex));
-
-               if(psFlipItem->bFlipped == OMAP_FALSE)
-               {
-                       
-                       OMAPLFBFlip(psSwapChain, (unsigned long)psFlipItem->sSysAddr);
-               }
-               
-               if(psFlipItem->bCmdCompleted == OMAP_FALSE)
-               {
-                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": FlushInternalVSyncQueue: Calling command complete for swap buffer (index %lu)\n", psSwapChain->ulRemoveIndex));
-
-                       psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete((IMG_HANDLE)psFlipItem->hCmdComplete, IMG_TRUE);
-               }
-
-               
-               psSwapChain->ulRemoveIndex++;
-               
-               if(psSwapChain->ulRemoveIndex > ulMaxIndex)
-               {
-                       psSwapChain->ulRemoveIndex = 0;
-               }
-
-               
-               psFlipItem->bFlipped = OMAP_FALSE;
-               psFlipItem->bCmdCompleted = OMAP_FALSE;
-               psFlipItem->bValid = OMAP_FALSE;
-               
-               
-               psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
-       }
-
-       psSwapChain->ulInsertIndex = 0;
-       psSwapChain->ulRemoveIndex = 0;
-}
-
-static void SetFlushStateInternalNoLock(OMAPLFB_DEVINFO* psDevInfo,
-                                        OMAP_BOOL bFlushState)
-{
-       OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
-
-       if (psSwapChain == NULL)
-       {
-               return;
-       }
-
-       if (bFlushState)
-       {
-               if (psSwapChain->ulSetFlushStateRefCount == 0)
-               {
-                       OMAPLFBDisableVSyncInterrupt(psSwapChain);
-                       psSwapChain->bFlushCommands = OMAP_TRUE;
-                       FlushInternalVSyncQueue(psSwapChain);
-               }
-               psSwapChain->ulSetFlushStateRefCount++;
-       }
-       else
-       {
-               if (psSwapChain->ulSetFlushStateRefCount != 0)
-               {
-                       psSwapChain->ulSetFlushStateRefCount--;
-                       if (psSwapChain->ulSetFlushStateRefCount == 0)
-                       {
-                               psSwapChain->bFlushCommands = OMAP_FALSE;
-                               OMAPLFBEnableVSyncInterrupt(psSwapChain);
-                       }
-               }
-       }
-}
-
-static IMG_VOID SetFlushStateInternal(OMAPLFB_DEVINFO* psDevInfo,
-                                      OMAP_BOOL bFlushState)
-{
-       unsigned long ulLockFlags;
-
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       SetFlushStateInternalNoLock(psDevInfo, bFlushState);
-
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-}
-
-static void SetFlushStateExternal(OMAPLFB_DEVINFO* psDevInfo,
-                                  OMAP_BOOL bFlushState)
-{
-       unsigned long ulLockFlags;
-
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       
-       if (psDevInfo->bFlushCommands != bFlushState)
-       {
-               psDevInfo->bFlushCommands = bFlushState;
-               SetFlushStateInternalNoLock(psDevInfo, bFlushState);
-       }
-
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-}
-
-static IMG_VOID SetDCState(IMG_HANDLE hDevice, IMG_UINT32 ui32State)
-{
-       OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO *)hDevice;
-
-       switch (ui32State)
-       {
-               case DC_STATE_FLUSH_COMMANDS:
-                       SetFlushStateExternal(psDevInfo, OMAP_TRUE);
-                       break;
-               case DC_STATE_NO_FLUSH_COMMANDS:
-                       SetFlushStateExternal(psDevInfo, OMAP_FALSE);
-                       break;
-               default:
-                       break;
-       }
-
-       return;
-}
-
-static int FrameBufferEvents(struct notifier_block *psNotif,
-                             unsigned long event, void *data)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-       OMAPLFB_SWAPCHAIN *psSwapChain;
-       struct fb_event *psFBEvent = (struct fb_event *)data;
-       OMAP_BOOL bBlanked;
-
-       
-       if (event != FB_EVENT_BLANK)
-       {
-               return 0;
-       }
-
-       psDevInfo = GetAnchorPtr();
-       psSwapChain = psDevInfo->psSwapChain;
-
-       bBlanked = (*(IMG_INT *)psFBEvent->data != 0) ? OMAP_TRUE: OMAP_FALSE;
-
-       if (bBlanked != psSwapChain->bBlanked)
-       {
-               psSwapChain->bBlanked = bBlanked;
-
-               if (bBlanked)
-               {
-                       
-                       SetFlushStateInternal(psDevInfo, OMAP_TRUE);
-               }
-               else
-               {
-                       
-                       SetFlushStateInternal(psDevInfo, OMAP_FALSE);
-               }
-       }
-
-       return 0;
-}
-
-
-static OMAP_ERROR UnblankDisplay(OMAPLFB_DEVINFO *psDevInfo)
-{
-       int res;
-
-       acquire_console_sem();
-       res = fb_blank(psDevInfo->psLINFBInfo, 0);
-       release_console_sem();
-#if !defined (CONFIG_OMAP2_DSS)
-       /* DSS2 returns error if unblank from a non-suspend state */
-       if (res != 0)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX
-                       ": fb_blank failed (%d)", res);
-               return (OMAP_ERROR_GENERIC);
-       }
-#endif
-
-       return (OMAP_OK);
-}
-
-#if defined (CONFIG_OMAP2_DSS)
-#include <linux/workqueue.h>
-struct wq_flip {
-        struct fb_var_screeninfo var;
-           struct fb_info *psLINFBInfo; 
-        struct work_struct work;
-};
-struct wq_flip wq_flipdss2;
-
-static void dss2_pan_display (struct work_struct *work)
-{
-    struct wq_flip *ptrwq_flip =
-            container_of(work, struct wq_flip, work);
-    if (ptrwq_flip->psLINFBInfo->fbops->fb_pan_display != NULL) {
-        ptrwq_flip->psLINFBInfo->fbops->fb_pan_display (&ptrwq_flip->var, ptrwq_flip->psLINFBInfo);
-
-    }
-
-}
-
-/*
-        Flip implementation for DSS2 using fb_pan_display
-*/
-IMG_VOID OMAPLFBFlipDSS2(OMAPLFB_SWAPCHAIN *psSwapChain,
-                                                 IMG_UINT32 aPhyAddr)
-{
-       OMAPLFB_DEVINFO *psDevInfo = GetAnchorPtr ();
-       struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
-       memcpy ( &wq_flipdss2.var, &psLINFBInfo->var, sizeof(struct fb_var_screeninfo)); 
-        wq_flipdss2.var.yoffset = (aPhyAddr-psLINFBInfo->fix.smem_start)/psLINFBInfo->fix.line_length;
-       wq_flipdss2.psLINFBInfo = psLINFBInfo;
-       schedule_work (&wq_flipdss2.work);
-}
-#endif
-
-static OMAP_ERROR EnableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo)
-{
-       int                res;
-       OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
-       OMAP_ERROR         eError;
-
-       
-       memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
-
-       psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents;
-
-       psSwapChain->bBlanked = OMAP_FALSE;
-
-       res = fb_register_client(&psDevInfo->sLINNotifBlock);
-       if (res != 0)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX
-                       ": fb_register_client failed (%d)", res);
-
-               return (OMAP_ERROR_GENERIC);
-       }
-
-       eError = UnblankDisplay(psDevInfo);
-       if (eError != OMAP_OK)
-       {
-               DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
-                       ": UnblankDisplay failed (%d)", eError));
-               return eError;
-       }
-
-       return (OMAP_OK);
-}
-
-static OMAP_ERROR DisableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo)
-{
-       int res;
-
-       
-       res = fb_unregister_client(&psDevInfo->sLINNotifBlock);
-       if (res != 0)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX
-                       ": fb_unregister_client failed (%d)", res);
-               return (OMAP_ERROR_GENERIC);
-       }
-
-       return (OMAP_OK);
-}
-
-static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID,
-                                 IMG_HANDLE *phDevice,
-                                 PVRSRV_SYNC_DATA* psSystemBufferSyncData)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-       OMAP_ERROR eError;
-
-       UNREFERENCED_PARAMETER(ui32DeviceID);
-
-       psDevInfo = GetAnchorPtr();
-
-       
-       psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
-       
-       eError = UnblankDisplay(psDevInfo);
-       if (eError != OMAP_OK)
-       {
-               DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
-                       ": UnblankDisplay failed (%d)", eError));
-               return (PVRSRV_ERROR_GENERIC);
-       }
-
-       
-       *phDevice = (IMG_HANDLE)psDevInfo;
-       
-       return (PVRSRV_OK);
-}
-
-static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice)
-{
-       UNREFERENCED_PARAMETER(hDevice);
-
-       return (PVRSRV_OK);
-}
-
-static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
-                                  IMG_UINT32 *pui32NumFormats,
-                                  DISPLAY_FORMAT *psFormat)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-       
-       if(!hDevice || !pui32NumFormats)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-       
-       *pui32NumFormats = 1;
-       
-       if(psFormat)
-       {
-               psFormat[0] = psDevInfo->sDisplayFormat;
-       }
-
-       return (PVRSRV_OK);
-}
-
-static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice, 
-                               DISPLAY_FORMAT *psFormat,
-                               IMG_UINT32 *pui32NumDims,
-                               DISPLAY_DIMS *psDim)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-
-       if(!hDevice || !psFormat || !pui32NumDims)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-
-       *pui32NumDims = 1;
-
-       
-       if(psDim)
-       {
-               psDim[0] = psDevInfo->sDisplayDim;
-       }
-       
-       return (PVRSRV_OK);
-}
-
-
-static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE *phBuffer)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-       
-       if(!hDevice || !phBuffer)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-
-       *phBuffer = (IMG_HANDLE)&psDevInfo->sSystemBuffer;
-
-       return (PVRSRV_OK);
-}
-
-
-static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO *psDCInfo)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-       
-       if(!hDevice || !psDCInfo)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-
-       *psDCInfo = psDevInfo->sDisplayInfo;
-
-       return (PVRSRV_OK);
-}
-
-static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE        hDevice,
-                                    IMG_HANDLE        hBuffer, 
-                                    IMG_SYS_PHYADDR   **ppsSysAddr,
-                                    IMG_UINT32        *pui32ByteSize,
-                                    IMG_VOID          **ppvCpuVAddr,
-                                    IMG_HANDLE        *phOSMapInfo,
-                                    IMG_BOOL          *pbIsContiguous)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-       OMAPLFB_BUFFER *psSystemBuffer;
-
-       if(!hDevice)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-       
-       if(!hBuffer)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-       psSystemBuffer = (OMAPLFB_BUFFER *)hBuffer;
-
-       if (!ppsSysAddr)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       *ppsSysAddr = &psSystemBuffer->sSysAddr;
-
-       if (!pui32ByteSize)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       *pui32ByteSize = (IMG_UINT32)psDevInfo->sFBInfo.ulBufferSize;
-
-       if (ppvCpuVAddr)
-       {
-               *ppvCpuVAddr = psSystemBuffer->sCPUVAddr;
-       }
-
-       if (phOSMapInfo)
-       {
-               *phOSMapInfo = (IMG_HANDLE)0;
-       }
-
-       if (pbIsContiguous)
-       {
-               *pbIsContiguous = IMG_TRUE;
-       }
-
-       return (PVRSRV_OK);
-}
-
-static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
-                                      IMG_UINT32 ui32Flags,
-                                      DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
-                                      DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
-                                      IMG_UINT32 ui32BufferCount,
-                                      PVRSRV_SYNC_DATA **ppsSyncData,
-                                      IMG_UINT32 ui32OEMFlags,
-                                      IMG_HANDLE *phSwapChain,
-                                      IMG_UINT32 *pui32SwapChainID)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-       OMAPLFB_SWAPCHAIN *psSwapChain;
-       OMAPLFB_BUFFER *psBuffer;
-       OMAPLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
-       IMG_UINT32 i;
-       PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
-       unsigned long ulLockFlags;
-       IMG_UINT32 ui32BuffersToSkip;
-
-       UNREFERENCED_PARAMETER(ui32OEMFlags);
-       UNREFERENCED_PARAMETER(pui32SwapChainID);
-       
-       
-       if(!hDevice
-       || !psDstSurfAttrib
-       || !psSrcSurfAttrib
-       || !ppsSyncData
-       || !phSwapChain)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-       
-       
-       if (psDevInfo->sDisplayInfo.ui32MaxSwapChains == 0)
-       {
-               return (PVRSRV_ERROR_NOT_SUPPORTED);
-       }
-
-       
-       if(psDevInfo->psSwapChain != NULL)
-       {
-               return (PVRSRV_ERROR_FLIP_CHAIN_EXISTS);
-       }
-       
-       
-       if(ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
-       {
-               return (PVRSRV_ERROR_TOOMANYBUFFERS);
-       }
-       
-       if ((psDevInfo->sFBInfo.ulRoundedBufferSize * (unsigned long)ui32BufferCount) > psDevInfo->sFBInfo.ulFBSize)
-       {
-               return (PVRSRV_ERROR_TOOMANYBUFFERS);
-       }
-
-       
-       ui32BuffersToSkip = psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers - ui32BufferCount;
-
-       
-       if(psDstSurfAttrib->pixelformat != psDevInfo->sDisplayFormat.pixelformat
-       || psDstSurfAttrib->sDims.ui32ByteStride != psDevInfo->sDisplayDim.ui32ByteStride
-       || psDstSurfAttrib->sDims.ui32Width != psDevInfo->sDisplayDim.ui32Width
-       || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
-       {
-               
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }               
-
-       if(psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat
-       || psDstSurfAttrib->sDims.ui32ByteStride != psSrcSurfAttrib->sDims.ui32ByteStride
-       || psDstSurfAttrib->sDims.ui32Width != psSrcSurfAttrib->sDims.ui32Width
-       || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
-       {
-               
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }               
-
-       
-       UNREFERENCED_PARAMETER(ui32Flags);
-       
-       
-       psSwapChain = (OMAPLFB_SWAPCHAIN*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_SWAPCHAIN));
-       if(!psSwapChain)
-       {
-               return (PVRSRV_ERROR_OUT_OF_MEMORY);
-       }
-
-       psBuffer = (OMAPLFB_BUFFER*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_BUFFER) * ui32BufferCount);
-       if(!psBuffer)
-       {
-               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
-               goto ErrorFreeSwapChain;
-       }
-
-       psVSyncFlips = (OMAPLFB_VSYNC_FLIP_ITEM *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_VSYNC_FLIP_ITEM) * ui32BufferCount);
-       if (!psVSyncFlips)
-       {
-               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
-               goto ErrorFreeBuffers;
-       }
-
-       psSwapChain->ulBufferCount = (unsigned long)ui32BufferCount;
-       psSwapChain->psBuffer = psBuffer;
-       psSwapChain->psVSyncFlips = psVSyncFlips;
-       psSwapChain->ulInsertIndex = 0;
-       psSwapChain->ulRemoveIndex = 0;
-       psSwapChain->psPVRJTable = &psDevInfo->sPVRJTable;
-       psSwapChain->psSwapChainLock = &psDevInfo->sSwapChainLock;
-
-       
-       for(i=0; i<ui32BufferCount-1; i++)
-       {
-               psBuffer[i].psNext = &psBuffer[i+1];
-       }
-       
-       psBuffer[i].psNext = &psBuffer[0];
-
-       
-       for(i=0; i<ui32BufferCount; i++)
-       {
-               IMG_UINT32 ui32SwapBuffer = i + ui32BuffersToSkip;
-               IMG_UINT32 ui32BufferOffset = ui32SwapBuffer * (IMG_UINT32)psDevInfo->sFBInfo.ulRoundedBufferSize;
-
-               psBuffer[i].psSyncData = ppsSyncData[i];
-
-               psBuffer[i].sSysAddr.uiAddr = psDevInfo->sFBInfo.sSysAddr.uiAddr + ui32BufferOffset;
-               psBuffer[i].sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr + ui32BufferOffset;
-       }
-
-       
-       for(i=0; i<ui32BufferCount; i++)
-       {
-               psVSyncFlips[i].bValid = OMAP_FALSE;
-               psVSyncFlips[i].bFlipped = OMAP_FALSE;
-               psVSyncFlips[i].bCmdCompleted = OMAP_FALSE;
-       }
-#if defined (SUPPORT_TI_DSS_FW)
-
-       OMAPLFBEnableDisplayRegisterAccess();
-
-       
-       psSwapChain->pvRegs = ioremap(psDevInfo->psLINFBInfo->fix.mmio_start, psDevInfo->psLINFBInfo->fix.mmio_len);
-       if (psSwapChain->pvRegs == NULL)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": Couldn't map registers needed for flipping\n");
-               goto ErrorDisableDisplayRegisters;
-       }
-#endif
-       if (OMAPLFBInstallVSyncISR(psSwapChain) != OMAP_OK)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": ISR handler failed to register\n");
-               goto ErrorUnmapRegisters;
-       }
-
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       
-       psDevInfo->psSwapChain = psSwapChain;
-
-       
-       psSwapChain->bFlushCommands = psDevInfo->bFlushCommands;
-
-       if (psSwapChain->bFlushCommands)
-       {
-               psSwapChain->ulSetFlushStateRefCount = 1;
-       }
-       else
-       {
-               psSwapChain->ulSetFlushStateRefCount = 0;
-               OMAPLFBEnableVSyncInterrupt(psSwapChain);
-       }
-               
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       if (EnableLFBEventNotification(psDevInfo)!= OMAP_OK)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": Couldn't enable framebuffer event notification\n");
-               goto ErrorUninstallVSyncInterrupt;
-       }
-
-       
-       *phSwapChain = (IMG_HANDLE)psSwapChain;
-
-       return (PVRSRV_OK);
-
-ErrorUninstallVSyncInterrupt:
-       if(OMAPLFBUninstallVSyncISR(psSwapChain) != OMAP_OK)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": Couldn't uninstall VSync ISR\n");
-       }
-ErrorUnmapRegisters:
-#if defined (SUPPORT_TI_DSS_FW)
-       iounmap(psSwapChain->pvRegs);
-ErrorDisableDisplayRegisters:
-       OMAPLFBDisableDisplayRegisterAccess();
-#endif
-       OMAPLFBFreeKernelMem(psVSyncFlips);
-ErrorFreeBuffers:
-       OMAPLFBFreeKernelMem(psBuffer);
-ErrorFreeSwapChain:
-       OMAPLFBFreeKernelMem(psSwapChain);
-
-       return eError;
-}
-
-static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice,
-       IMG_HANDLE hSwapChain)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-       OMAPLFB_SWAPCHAIN *psSwapChain;
-       unsigned long ulLockFlags;
-       OMAP_ERROR eError;
-
-       
-       if(!hDevice || !hSwapChain)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-       
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-       psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
-       if (psSwapChain != psDevInfo->psSwapChain)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       eError = DisableLFBEventNotification(psDevInfo);
-       if (eError != OMAP_OK)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": Couldn't disable framebuffer event notification\n");
-       }
-
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       OMAPLFBDisableVSyncInterrupt(psSwapChain);
-
-       
-       FlushInternalVSyncQueue(psSwapChain);
-
-       
-       OMAPLFBFlip(psSwapChain, (unsigned long)psDevInfo->sFBInfo.sSysAddr.uiAddr);
-
-       
-       psDevInfo->psSwapChain = NULL;
-
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       if(OMAPLFBUninstallVSyncISR(psSwapChain) != OMAP_OK)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": Couldn't uninstall VSync ISR\n");
-               return (PVRSRV_ERROR_GENERIC);
-       }
-
-#if defined (SUPPORT_TI_DSS_FW)
-       
-       iounmap(psSwapChain->pvRegs);
-
-       OMAPLFBDisableDisplayRegisterAccess();
-#endif
-       
-       OMAPLFBFreeKernelMem(psSwapChain->psVSyncFlips);
-       OMAPLFBFreeKernelMem(psSwapChain->psBuffer);
-       OMAPLFBFreeKernelMem(psSwapChain);
-
-       return (PVRSRV_OK);
-}
-
-static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
-       IMG_HANDLE hSwapChain,
-       IMG_RECT *psRect)
-{
-       UNREFERENCED_PARAMETER(hDevice);
-       UNREFERENCED_PARAMETER(hSwapChain);
-       UNREFERENCED_PARAMETER(psRect);
-
-       
-       
-       return (PVRSRV_ERROR_NOT_SUPPORTED);
-}
-
-static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
-                                 IMG_HANDLE hSwapChain,
-                                 IMG_RECT *psRect)
-{
-       UNREFERENCED_PARAMETER(hDevice);
-       UNREFERENCED_PARAMETER(hSwapChain);
-       UNREFERENCED_PARAMETER(psRect);
-
-       
-
-       return (PVRSRV_ERROR_NOT_SUPPORTED);
-}
-
-static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
-                                      IMG_HANDLE hSwapChain,
-                                      IMG_UINT32 ui32CKColour)
-{
-       UNREFERENCED_PARAMETER(hDevice);
-       UNREFERENCED_PARAMETER(hSwapChain);
-       UNREFERENCED_PARAMETER(ui32CKColour);
-
-       
-
-       return (PVRSRV_ERROR_NOT_SUPPORTED);
-}
-
-static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
-                                      IMG_HANDLE hSwapChain,
-                                      IMG_UINT32 ui32CKColour)
-{
-       UNREFERENCED_PARAMETER(hDevice);
-       UNREFERENCED_PARAMETER(hSwapChain);
-       UNREFERENCED_PARAMETER(ui32CKColour);
-
-       
-
-       return (PVRSRV_ERROR_NOT_SUPPORTED);
-}
-
-static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
-                                 IMG_HANDLE hSwapChain,
-                                 IMG_UINT32 *pui32BufferCount,
-                                 IMG_HANDLE *phBuffer)
-{
-       OMAPLFB_DEVINFO   *psDevInfo;
-       OMAPLFB_SWAPCHAIN *psSwapChain;
-       unsigned long      i;
-       
-       
-       if(!hDevice 
-       || !hSwapChain
-       || !pui32BufferCount
-       || !phBuffer)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-       
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-       psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
-       if (psSwapChain != psDevInfo->psSwapChain)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-       
-       
-       *pui32BufferCount = (IMG_UINT32)psSwapChain->ulBufferCount;
-       
-       
-       for(i=0; i<psSwapChain->ulBufferCount; i++)
-       {
-               phBuffer[i] = (IMG_HANDLE)&psSwapChain->psBuffer[i];
-       }
-       
-       return (PVRSRV_OK);
-}
-
-static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
-                                   IMG_HANDLE hBuffer,
-                                   IMG_UINT32 ui32SwapInterval,
-                                   IMG_HANDLE hPrivateTag,
-                                   IMG_UINT32 ui32ClipRectCount,
-                                   IMG_RECT *psClipRect)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-
-       UNREFERENCED_PARAMETER(ui32SwapInterval);
-       UNREFERENCED_PARAMETER(hPrivateTag);
-       UNREFERENCED_PARAMETER(psClipRect);
-       
-       if(!hDevice 
-       || !hBuffer
-       || (ui32ClipRectCount != 0))
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-
-       
-       return (PVRSRV_OK);
-}
-
-static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice,
-                                   IMG_HANDLE hSwapChain)
-{
-       OMAPLFB_DEVINFO   *psDevInfo;
-       OMAPLFB_SWAPCHAIN *psSwapChain;
-       unsigned long      ulLockFlags;
-
-       if(!hDevice || !hSwapChain)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-       psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
-       if (psSwapChain != psDevInfo->psSwapChain)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-       
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       
-       FlushInternalVSyncQueue(psSwapChain);
-
-       
-       OMAPLFBFlip(psSwapChain, (unsigned long)psDevInfo->sFBInfo.sSysAddr.uiAddr);
-
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       return (PVRSRV_OK);
-}
-
-OMAP_BOOL OMAPLFBVSyncIHandler(OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-       OMAP_BOOL bStatus = OMAP_FALSE;
-       OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem;
-       unsigned long ulMaxIndex;
-       unsigned long ulLockFlags;
-
-       psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
-       ulMaxIndex = psSwapChain->ulBufferCount - 1;
-
-       spin_lock_irqsave(psSwapChain->psSwapChainLock, ulLockFlags);
-
-       
-       if (psSwapChain->bFlushCommands)
-       {
-               goto ExitUnlock;
-       }
-
-       while(psFlipItem->bValid)
-       {       
-               
-               if(psFlipItem->bFlipped)
-               {
-                       
-                       if(!psFlipItem->bCmdCompleted)
-                       {
-                               
-                               psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete((IMG_HANDLE)psFlipItem->hCmdComplete, IMG_TRUE);
-
-                               
-                               psFlipItem->bCmdCompleted = OMAP_TRUE;
-                       }
-
-                       
-                       psFlipItem->ulSwapInterval--;
-
-                       
-                       if(psFlipItem->ulSwapInterval == 0)
-                       {       
-                               
-                               psSwapChain->ulRemoveIndex++;
-                               
-                               if(psSwapChain->ulRemoveIndex > ulMaxIndex)
-                               {
-                                       psSwapChain->ulRemoveIndex = 0;
-                               }
-                               
-                               
-                               psFlipItem->bCmdCompleted = OMAP_FALSE;
-                               psFlipItem->bFlipped = OMAP_FALSE;
-       
-                               
-                               psFlipItem->bValid = OMAP_FALSE;
-                       }
-                       else
-                       {
-                               
-                               break;
-                       }
-               }
-               else
-               {
-                       
-                       OMAPLFBFlip(psSwapChain, (unsigned long)psFlipItem->sSysAddr);
-                       
-                       
-                       psFlipItem->bFlipped = OMAP_TRUE;
-                       
-                       
-                       break;
-               }
-               
-               
-               psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
-       }
-               
-ExitUnlock:
-       spin_unlock_irqrestore(psSwapChain->psSwapChainLock, ulLockFlags);
-
-       return bStatus;
-}
-
-static IMG_BOOL ProcessFlip(IMG_HANDLE  hCmdCookie,
-                            IMG_UINT32  ui32DataSize,
-                            IMG_VOID   *pvData)
-{
-       DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
-       OMAPLFB_DEVINFO *psDevInfo;
-       OMAPLFB_BUFFER *psBuffer;
-       OMAPLFB_SWAPCHAIN *psSwapChain;
-#if defined(SYS_USING_INTERRUPTS)
-       OMAPLFB_VSYNC_FLIP_ITEM* psFlipItem;
-#endif
-       unsigned long ulLockFlags;
-
-       
-       if(!hCmdCookie || !pvData)
-       {
-               return IMG_FALSE;
-       }
-
-       
-       psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)pvData;
-
-       if (psFlipCmd == IMG_NULL || sizeof(DISPLAYCLASS_FLIP_COMMAND) != ui32DataSize)
-       {
-               return IMG_FALSE;
-       }
-
-       
-       psDevInfo = (OMAPLFB_DEVINFO*)psFlipCmd->hExtDevice;
-       
-       psBuffer = (OMAPLFB_BUFFER*)psFlipCmd->hExtBuffer;
-       psSwapChain = (OMAPLFB_SWAPCHAIN*) psFlipCmd->hExtSwapChain;
-
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       
-       if (psDevInfo->bDeviceSuspended)
-       {
-               psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
-               goto ExitTrueUnlock;
-       }
-
-#if defined(SYS_USING_INTERRUPTS)
-       
-       if(psFlipCmd->ui32SwapInterval == 0 || psSwapChain->bFlushCommands == OMAP_TRUE)
-       {
-#endif
-               
-               OMAPLFBFlip(psSwapChain, (unsigned long)psBuffer->sSysAddr.uiAddr);
-
-               
-               psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
-
-#if defined(SYS_USING_INTERRUPTS)
-               goto ExitTrueUnlock;
-       }
-
-       psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulInsertIndex];
-
-       
-       if(psFlipItem->bValid == OMAP_FALSE)
-       {
-               unsigned long ulMaxIndex = psSwapChain->ulBufferCount - 1;
-               
-               if(psSwapChain->ulInsertIndex == psSwapChain->ulRemoveIndex)
-               {
-                       
-                       OMAPLFBFlip(psSwapChain, (unsigned long)psBuffer->sSysAddr.uiAddr);
-
-                       psFlipItem->bFlipped = OMAP_TRUE;
-               }
-               else
-               {
-                       psFlipItem->bFlipped = OMAP_FALSE;
-               }
-
-               psFlipItem->hCmdComplete = (OMAP_HANDLE)hCmdCookie;
-               psFlipItem->ulSwapInterval = (unsigned long)psFlipCmd->ui32SwapInterval;
-               psFlipItem->sSysAddr = &psBuffer->sSysAddr;
-               psFlipItem->bValid = OMAP_TRUE;
-
-               psSwapChain->ulInsertIndex++;
-               if(psSwapChain->ulInsertIndex > ulMaxIndex)
-               {
-                       psSwapChain->ulInsertIndex = 0;
-               }
-
-               goto ExitTrueUnlock;
-       }
-       
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-       return IMG_FALSE;
-#endif
-
-ExitTrueUnlock:
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-       return IMG_TRUE;
-}
-
-
-static OMAP_ERROR InitDev(OMAPLFB_DEVINFO *psDevInfo)
-{
-       struct fb_info *psLINFBInfo;
-       struct module *psLINFBOwner;
-       OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo;
-       OMAP_ERROR eError = OMAP_ERROR_GENERIC;
-       unsigned long FBSize;
-
-       acquire_console_sem();
-
-       if (fb_idx < 0 || fb_idx >= num_registered_fb)
-       {
-               eError = OMAP_ERROR_INVALID_DEVICE;
-               goto errRelSem;
-       }
-
-       psLINFBInfo = registered_fb[fb_idx];
-
-       psLINFBOwner = psLINFBInfo->fbops->owner;
-       if (!try_module_get(psLINFBOwner))
-       {
-               printk(KERN_INFO DRIVER_PREFIX
-                       ": Couldn't get framebuffer module\n");
-
-               goto errRelSem;
-       }
-
-       if (psLINFBInfo->fbops->fb_open != NULL)
-       {
-               int res;
-
-               res = psLINFBInfo->fbops->fb_open(psLINFBInfo, 0);
-               if (res != 0)
-               {
-                       printk(KERN_INFO DRIVER_PREFIX
-                               ": Couldn't open framebuffer: %d\n", res);
-
-                       goto errModPut;
-               }
-       }
-
-       
-       psDevInfo->psLINFBInfo = psLINFBInfo;
-
-        DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                        ": psLINFBInfo->screen_size: 0x%lx\n",
-                        psLINFBInfo->screen_size));
-        DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                        ": psLINFBInfo->fix.smem_len : 0x%lx\n",
-                        psLINFBInfo->fix.smem_len));
-
-
-       FBSize = (psLINFBInfo->screen_size) != 0 ?
-                                       psLINFBInfo->screen_size :
-                                       psLINFBInfo->fix.smem_len;
-        
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer physical address: 0x%lx\n",
-                       psLINFBInfo->fix.smem_start));
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer virtual address: 0x%lx\n",
-                       (unsigned long)psLINFBInfo->screen_base));
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer size: %lu\n",
-                       FBSize));
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer virtual width: %u\n",
-                       psLINFBInfo->var.xres_virtual));
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer virtual height: %u\n",
-                       psLINFBInfo->var.yres_virtual));
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer width: %u\n",
-                       psLINFBInfo->var.xres));
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer height: %u\n",
-                       psLINFBInfo->var.yres));
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer stride: %u\n",
-                       psLINFBInfo->fix.line_length));
-
-       
-       psPVRFBInfo->sSysAddr.uiAddr = psLINFBInfo->fix.smem_start;
-       psPVRFBInfo->sCPUVAddr = psLINFBInfo->screen_base;
-       if ((psLINFBInfo->var.rotate == FB_ROTATE_CW)
-               || (psLINFBInfo->var.rotate == FB_ROTATE_CCW) ) {
-          psPVRFBInfo->ulWidth = psLINFBInfo->var.yres;
-          psPVRFBInfo->ulHeight = psLINFBInfo->var.xres;
-    } else {
-       psPVRFBInfo->ulWidth = psLINFBInfo->var.xres;
-       psPVRFBInfo->ulHeight = psLINFBInfo->var.yres;
-    }
-
-       psPVRFBInfo->ulByteStride =  psLINFBInfo->fix.line_length;
-       psPVRFBInfo->ulFBSize = FBSize;
-       psPVRFBInfo->ulBufferSize = psPVRFBInfo->ulHeight * psPVRFBInfo->ulByteStride;
-       
-
-
-#ifdef CONFIG_OMAP2_DSS        
-    psPVRFBInfo->ulRoundedBufferSize = psPVRFBInfo->ulBufferSize;
-#else
-       psPVRFBInfo->ulRoundedBufferSize = OMAPLFB_PAGE_ROUNDUP(psPVRFBInfo->ulBufferSize);
-#endif
-       if(psLINFBInfo->var.bits_per_pixel == 16)
-       {
-               if((psLINFBInfo->var.red.length == 5) &&
-                       (psLINFBInfo->var.green.length == 6) && 
-                       (psLINFBInfo->var.blue.length == 5) && 
-                       (psLINFBInfo->var.red.offset == 11) &&
-                       (psLINFBInfo->var.green.offset == 5) && 
-                       (psLINFBInfo->var.blue.offset == 0) && 
-                       (psLINFBInfo->var.red.msb_right == 0))
-               {
-                       psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_RGB565;
-               }
-               else
-               {
-                       printk("Unknown FB format\n");
-               }
-       }
-       else if(psLINFBInfo->var.bits_per_pixel == 32)
-       {
-               if((psLINFBInfo->var.red.length == 8) &&
-                       (psLINFBInfo->var.green.length == 8) && 
-                       (psLINFBInfo->var.blue.length == 8) && 
-                       (psLINFBInfo->var.red.offset == 16) &&
-                       (psLINFBInfo->var.green.offset == 8) && 
-                       (psLINFBInfo->var.blue.offset == 0) && 
-                       (psLINFBInfo->var.red.msb_right == 0))
-               {
-                       printk ("PVRSRV_PIXEL_FORMAT_ARGB8888\n");
-                       psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_ARGB8888;
-               }
-               else
-               {
-                       printk("Unknown FB format\n");
-               }
-       }       
-       else
-       {
-               printk("Unknown FB format\n");
-       }
-
-       
-       psDevInfo->sFBInfo.sSysAddr.uiAddr = psPVRFBInfo->sSysAddr.uiAddr;
-       psDevInfo->sFBInfo.sCPUVAddr = psPVRFBInfo->sCPUVAddr;
-
-#ifdef CONFIG_OMAP2_DSS
-       INIT_WORK (&wq_flipdss2.work, dss2_pan_display);
-#endif
-
-       eError = OMAP_OK;
-       goto errRelSem;
-
-errModPut:
-       module_put(psLINFBOwner);
-errRelSem:
-       release_console_sem();
-       return eError;
-}
-
-static void DeInitDev(OMAPLFB_DEVINFO *psDevInfo)
-{
-       struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
-       struct module *psLINFBOwner;
-
-       acquire_console_sem();
-
-       psLINFBOwner = psLINFBInfo->fbops->owner;
-
-       if (psLINFBInfo->fbops->fb_release != NULL) 
-       {
-               (void) psLINFBInfo->fbops->fb_release(psLINFBInfo, 0);
-       }
-
-       module_put(psLINFBOwner);
-
-       release_console_sem();
-}
-
-OMAP_ERROR OMAPLFBInit(void)
-{
-       OMAPLFB_DEVINFO         *psDevInfo;
-
-       psDevInfo = GetAnchorPtr();
-       
-       if (psDevInfo == NULL)
-       {
-               PFN_CMD_PROC                    pfnCmdProcList[OMAPLFB_COMMAND_COUNT];
-               IMG_UINT32                              aui32SyncCountList[OMAPLFB_COMMAND_COUNT][2];
-               
-               psDevInfo = (OMAPLFB_DEVINFO *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_DEVINFO));
-
-               if(!psDevInfo)
-               {
-                       return (OMAP_ERROR_OUT_OF_MEMORY);
-               }
-
-               
-               memset(psDevInfo, 0, sizeof(OMAPLFB_DEVINFO));
-
-               
-               SetAnchorPtr((void*)psDevInfo);
-
-               
-               psDevInfo->ulRefCount = 0;
-
-               
-               if(InitDev(psDevInfo) != OMAP_OK)
-               {
-                       return (OMAP_ERROR_INIT_FAILURE);
-               }
-
-               if(OMAPLFBGetLibFuncAddr ("PVRGetDisplayClassJTable", &pfnGetPVRJTable) != OMAP_OK)
-               {
-                       return (OMAP_ERROR_INIT_FAILURE);
-               }
-
-               
-               if(!(*pfnGetPVRJTable)(&psDevInfo->sPVRJTable))
-               {
-                       return (OMAP_ERROR_INIT_FAILURE);
-               }
-
-                               
-               spin_lock_init(&psDevInfo->sSwapChainLock);
-
-               psDevInfo->psSwapChain = 0;
-               psDevInfo->bFlushCommands = OMAP_FALSE;
-               psDevInfo->bDeviceSuspended = OMAP_FALSE;
-
-               psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = (IMG_UINT32)(psDevInfo->sFBInfo.ulFBSize / psDevInfo->sFBInfo.ulRoundedBufferSize);
-#if !defined (SUPPORT_TI_DSS_FW)
-               /*  Limiting the ui32MaxSwapChainBuffers to 3 */
-               if (psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers > 3)
-                       psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = 3;
-#endif
-#if 1 
-                /* for fb_pan_display to work, yres_virtual should be set to number of buffers multiplied yres */  
-                psDevInfo->psLINFBInfo->var.yres_virtual = psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers * psDevInfo->psLINFBInfo->var.yres;
-                if (fb_set_var(psDevInfo->psLINFBInfo, &psDevInfo->psLINFBInfo->var) != 0)
-                {
-                   printk(KERN_INFO DRIVER_PREFIX ": Couldn't set framebuffer paramter: ");
-
-                }
-#endif
-               if (psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers == 0)
-               {
-                       psDevInfo->sDisplayInfo.ui32MaxSwapChains = 0;
-                       psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 0;
-               }
-               else
-               {
-                       psDevInfo->sDisplayInfo.ui32MaxSwapChains = 1;
-                       psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 3;
-               }
-               psDevInfo->sDisplayInfo.ui32MinSwapInterval = 0;
-
-               strncpy(psDevInfo->sDisplayInfo.szDisplayName, DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
-       
-               psDevInfo->sDisplayFormat.pixelformat = psDevInfo->sFBInfo.ePixelFormat;
-               psDevInfo->sDisplayDim.ui32Width      = (IMG_UINT32)psDevInfo->sFBInfo.ulWidth;
-               psDevInfo->sDisplayDim.ui32Height     = (IMG_UINT32)psDevInfo->sFBInfo.ulHeight;
-               psDevInfo->sDisplayDim.ui32ByteStride = (IMG_UINT32)psDevInfo->sFBInfo.ulByteStride;
-
-               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Maximum number of swap chain buffers: %lu\n",
-                       psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers));
-
-               
-               psDevInfo->sSystemBuffer.sSysAddr = psDevInfo->sFBInfo.sSysAddr;
-               psDevInfo->sSystemBuffer.sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr;
-               psDevInfo->sSystemBuffer.ulBufferSize = psDevInfo->sFBInfo.ulRoundedBufferSize;
-
-               
-
-               psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
-               psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
-               psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
-               psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
-               psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
-               psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
-               psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
-               psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
-               psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
-               psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
-               psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
-               psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
-               psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
-               psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
-               psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
-               psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
-               psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
-               psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
-
-               
-               if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice (
-                       &psDevInfo->sDCJTable,
-                       &psDevInfo->ulDeviceID ) != PVRSRV_OK)
-               {
-                       return (OMAP_ERROR_DEVICE_REGISTER_FAILED);
-               }
-               
-               
-               pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
-
-               
-               aui32SyncCountList[DC_FLIP_COMMAND][0] = 0; 
-               aui32SyncCountList[DC_FLIP_COMMAND][1] = 2; 
-
-               
-
-
-
-               if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList (psDevInfo->ulDeviceID,
-                                                                                                                               &pfnCmdProcList[0],
-                                                                                                                               aui32SyncCountList,
-                                                                                                                               OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
-               {
-                       printk(KERN_WARNING DRIVER_PREFIX ": Can't register callback\n");
-                       return (OMAP_ERROR_CANT_REGISTER_CALLBACK);
-               }
-
-       }
-
-       
-       psDevInfo->ulRefCount++;
-
-       
-       return (OMAP_OK);
-       
-       }
-
-OMAP_ERROR OMAPLFBDeinit(void)
-{
-       OMAPLFB_DEVINFO *psDevInfo, *psDevFirst;
-
-       psDevFirst = GetAnchorPtr();
-       psDevInfo = psDevFirst;
-
-       
-       if (psDevInfo == NULL)
-       {
-               return (OMAP_ERROR_GENERIC);
-       }
-
-       
-       psDevInfo->ulRefCount--;
-
-       if (psDevInfo->ulRefCount == 0)
-       {
-               
-               PVRSRV_DC_DISP2SRV_KMJTABLE     *psJTable = &psDevInfo->sPVRJTable;
-
-               if (psDevInfo->sPVRJTable.pfnPVRSRVRemoveCmdProcList (psDevInfo->ulDeviceID, OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
-               {
-                       return (OMAP_ERROR_GENERIC);
-               }
-
-               
-               if (psJTable->pfnPVRSRVRemoveDCDevice(psDevInfo->ulDeviceID) != PVRSRV_OK)
-               {
-                       return (OMAP_ERROR_GENERIC);
-               }
-               
-               DeInitDev(psDevInfo);
-
-               
-               OMAPLFBFreeKernelMem(psDevInfo);
-       }
-       
-       
-       SetAnchorPtr(NULL);
-
-       
-       return (OMAP_OK);
-}
-
-
-#if defined(LDM_PLATFORM)
-void OMAPLFBDriverSuspend(void)
-{
-       OMAPLFB_DEVINFO *psDevInfo = GetAnchorPtr();
-       unsigned long    ulLockFlags;
-
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       if (psDevInfo->bDeviceSuspended)
-       {
-               goto ExitUnlock;
-       }
-       psDevInfo->bDeviceSuspended = OMAP_TRUE;
-
-       
-       SetFlushStateInternalNoLock(psDevInfo, OMAP_TRUE);
-
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       
-#if defined (SUPPORT_TI_DSS_FW)                
-       if (psDevInfo->psSwapChain != NULL)
-       {
-               OMAPLFBDisableDisplayRegisterAccess();
-       }
-#endif
-       return;
-
-ExitUnlock:
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-}
-
-void OMAPLFBDriverResume(void)
-{
-       OMAPLFB_DEVINFO *psDevInfo = GetAnchorPtr();
-       unsigned long    ulLockFlags;
-
-       if (psDevInfo->bDeviceSuspended == OMAP_FALSE)
-       {
-               return;
-       }
-#if defined (SUPPORT_TI_DSS_FW)
-       if (psDevInfo->psSwapChain != NULL)
-       {
-               OMAPLFBEnableDisplayRegisterAccess();
-       }
-#endif
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       
-       SetFlushStateInternalNoLock(psDevInfo, OMAP_FALSE);
-
-       psDevInfo->bDeviceSuspended = OMAP_FALSE;
-
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-}
-#endif
-
diff --git a/services4/3rdparty/dc_ti8168_linux/omaplfb_linux.c b/services4/3rdparty/dc_ti8168_linux/omaplfb_linux.c
deleted file mode 100644 (file)
index 9002cba..0000000
+++ /dev/null
@@ -1,405 +0,0 @@
-/**********************************************************************
- *
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
- * 
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- * 
- * This program is distributed in the hope it will be useful but, except 
- * as otherwise stated in writing, without any warranty; without even the 
- * implied warranty of merchantability or fitness for a particular purpose. 
- * See the GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * 
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
- *
- ******************************************************************************/
-
-#ifndef AUTOCONF_INCLUDED
-#include <linux/config.h>
-#endif
-
-#include <linux/version.h>
-#include <linux/module.h>
-
-#include <linux/pci.h>
-#include <asm/uaccess.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-
-
-#include "img_defs.h"
-#include "servicesext.h"
-#include "kerneldisplay.h"
-#include "omaplfb.h"
-#include "pvrmodule.h"
-
-#include <plat/ti81xx-vpss.h>
-
-#if defined(LDM_PLATFORM)
-#include <linux/platform_device.h>
-#endif 
-
-#if defined (SUPPORT_TI_DSS_FW)
-#include <asm/io.h>
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
-#include <plat/display.h>
-#else 
-#include <asm/arch-omap/display.h>
-#endif 
-#else
-#if !defined (CONFIG_OMAP2_DSS)
-#define DISPC_IRQ_VSYNC 0x0002
-int omap_dispc_request_irq(unsigned long irq_mask, void (*callback)(void *data),
-                           void *data)
-{
-    printk(" omap_dispc_request_irq NOT Implemented \n");
-    return (0);
-}
-void omap_dispc_free_irq(unsigned long irq_mask, void (*callback)(void *data),
-                         void *data)
-{
-    printk("omap_dispc_free_irq NOT Implemented \n");
-}
-
-void omap_dispc_set_plane_base(int plane, IMG_UINT32 phys_addr)
-{
-    printk (" omap_dispc_set_plane_base NOT Implemented \n");
-}
-
-#else
-#include <plat/display.h>
-#include <linux/console.h>
-#include <linux/fb.h>
-static int pOMAPLFBVSyncISRHandle = 0;
-#endif
-#endif
-
-MODULE_SUPPORTED_DEVICE(DEVNAME);
-
-#define unref__ __attribute__ ((unused))
-
-void *OMAPLFBAllocKernelMem(unsigned long ulSize)
-{
-       return kmalloc(ulSize, GFP_KERNEL);
-}
-
-void OMAPLFBFreeKernelMem(void *pvMem)
-{
-       kfree(pvMem);
-}
-
-
-OMAP_ERROR OMAPLFBGetLibFuncAddr (char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
-{
-       if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
-       {
-               return (OMAP_ERROR_INVALID_PARAMS);
-       }
-
-       
-       *ppfnFuncTable = PVRGetDisplayClassJTable;
-
-       return (OMAP_OK);
-}
-#if !defined (SUPPORT_TI_DSS_FW) 
-IMG_VOID OMAPLFBEnableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-#if defined (CONFIG_OMAP2_DSS)
-       if (pOMAPLFBVSyncISRHandle == 0)
-#endif
-               OMAPLFBInstallVSyncISR (psSwapChain);
-}
-
-IMG_VOID OMAPLFBDisableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-#if defined (CONFIG_OMAP2_DSS)
-       if (pOMAPLFBVSyncISRHandle != 0)
-#endif
-               OMAPLFBUninstallVSyncISR (psSwapChain);
-}
-#else
-static void OMAPLFBVSyncWriteReg(OMAPLFB_SWAPCHAIN *psSwapChain, unsigned long ulOffset, unsigned long ulValue)
-{
-       void *pvRegAddr = (void *)((char *)psSwapChain->pvRegs + ulOffset);
-
-       
-       writel(ulValue, pvRegAddr);
-}
-
-static unsigned long OMAPLFBVSyncReadReg(OMAPLFB_SWAPCHAIN *psSwapChain, unsigned long ulOffset)
-{
-       return readl((char *)psSwapChain->pvRegs + ulOffset);
-}
-
-void OMAPLFBEnableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-#if defined(SYS_USING_INTERRUPTS)
-       
-       unsigned long ulInterruptEnable  = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_IRQENABLE);
-       ulInterruptEnable |= OMAPLCD_INTMASK_VSYNC;
-       OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_IRQENABLE, ulInterruptEnable );
-#endif
-}
-
-void OMAPLFBDisableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-#if defined(SYS_USING_INTERRUPTS)
-       
-       unsigned long ulInterruptEnable = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_IRQENABLE);
-       ulInterruptEnable &= ~(OMAPLCD_INTMASK_VSYNC);
-       OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_IRQENABLE, ulInterruptEnable);
-#endif
-}
-#endif
-#if defined(SYS_USING_INTERRUPTS)
-static void
-#if defined (SUPPORT_TI_DSS_FW)
-OMAPLFBVSyncISR(void *arg, struct pt_regs unref__ *regs)
-#else
-#if defined (CONFIG_OMAP2_DSS)
-OMAPLFBVSyncISR(void *arg)
-#else
-OMAPLFBVSyncISR(void *arg)
-#endif
-#endif
-{
-       OMAPLFB_SWAPCHAIN *psSwapChain= (OMAPLFB_SWAPCHAIN *)arg;
-       (void) OMAPLFBVSyncIHandler(psSwapChain);
-}
-#endif
-#if !defined (SUPPORT_TI_DSS_FW)
-OMAP_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-#if !defined (CONFIG_OMAP2_DSS)
-       if (omap_dispc_request_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain) != 0)
-#else
-        int ret;
-       ret = vps_grpx_register_isr ((vsync_callback_t)OMAPLFBVSyncISR, psSwapChain, 0); // fb_idx = 0
-        if ( ret == 0) 
-             pOMAPLFBVSyncISRHandle  = 1;
-        else 
-            pOMAPLFBVSyncISRHandle = 0;
-       
-        if (pOMAPLFBVSyncISRHandle == 0)
-#endif
-               return PVRSRV_ERROR_OUT_OF_MEMORY; /* not worth a proper mapping */
-       return OMAP_OK;
-}
-
-
-OMAP_ERROR OMAPLFBUninstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-#if !defined (CONFIG_OMAP2_DSS)
-       omap_dispc_free_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain);
-#else
-        int ret;
-        ret = vps_grpx_unregister_isr((vsync_callback_t) OMAPLFBVSyncISR, (void *)psSwapChain, 0); // fb_idx = 0
-
-#endif
-       return OMAP_OK;         
-} 
-
-
-IMG_VOID OMAPLFBFlip(OMAPLFB_SWAPCHAIN *psSwapChain,
-                                                 IMG_UINT32 aPhyAddr)
-{
-#if !defined (CONFIG_OMAP2_DSS)
-       omap_dispc_set_plane_base(0, aPhyAddr);
-#else
-       OMAPLFBFlipDSS2 (psSwapChain, aPhyAddr);
-#endif
-}
-#else
-
-OMAP_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-#if defined(SYS_USING_INTERRUPTS)
-       OMAPLFBDisableVSyncInterrupt(psSwapChain);
-
-       if (omap2_disp_register_isr(OMAPLFBVSyncISR, psSwapChain,
-                                   DISPC_IRQSTATUS_VSYNC))
-       {
-               printk(KERN_INFO DRIVER_PREFIX ": OMAPLFBInstallVSyncISR: Request OMAPLCD IRQ failed\n");
-               return (OMAP_ERROR_INIT_FAILURE);
-       }
-
-#endif
-       return (OMAP_OK);
-}
-
-
-OMAP_ERROR OMAPLFBUninstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-#if defined(SYS_USING_INTERRUPTS)
-       OMAPLFBDisableVSyncInterrupt(psSwapChain);
-
-       omap2_disp_unregister_isr(OMAPLFBVSyncISR);
-
-#endif
-       return (OMAP_OK);
-}
-
-void OMAPLFBEnableDisplayRegisterAccess(void)
-{
-       omap2_disp_get_dss();
-}
-
-void OMAPLFBDisableDisplayRegisterAccess(void)
-{
-       omap2_disp_put_dss();
-}
-
-void OMAPLFBFlip(OMAPLFB_SWAPCHAIN *psSwapChain, unsigned long aPhyAddr)
-{
-       unsigned long control;
-
-       
-       OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA0, aPhyAddr);
-       OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA1, aPhyAddr);
-
-       control = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_CONTROL);
-       control |= OMAP_CONTROL_GOLCD;
-       OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_CONTROL, control);
-}
-#endif
-
-#if defined(LDM_PLATFORM)
-
-static OMAP_BOOL bDeviceSuspended;
-
-static void OMAPLFBCommonSuspend(void)
-{
-       if (bDeviceSuspended)
-       {
-               return;
-       }
-
-       OMAPLFBDriverSuspend();
-
-       bDeviceSuspended = OMAP_TRUE;
-}
-
-static int OMAPLFBDriverSuspend_Entry(struct platform_device unref__ *pDevice, pm_message_t unref__ state)
-{
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": OMAPLFBDriverSuspend_Entry\n"));
-
-       OMAPLFBCommonSuspend();
-
-       return 0;
-}
-
-static int OMAPLFBDriverResume_Entry(struct platform_device unref__ *pDevice)
-{
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": OMAPLFBDriverResume_Entry\n"));
-
-       OMAPLFBDriverResume();
-
-       bDeviceSuspended = OMAP_FALSE;
-
-       return 0;
-}
-
-static IMG_VOID OMAPLFBDriverShutdown_Entry(struct platform_device unref__ *pDevice)
-{
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": OMAPLFBDriverShutdown_Entry\n"));
-
-       OMAPLFBCommonSuspend();
-}
-
-static void OMAPLFBDeviceRelease_Entry(struct device unref__ *pDevice)
-{
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": OMAPLFBDriverRelease_Entry\n"));
-
-       OMAPLFBCommonSuspend();
-}
-
-static struct platform_driver omaplfb_driver = {
-       .driver = {
-               .name           = DRVNAME,
-       },
-       .suspend        = OMAPLFBDriverSuspend_Entry,
-       .resume         = OMAPLFBDriverResume_Entry,
-       .shutdown       = OMAPLFBDriverShutdown_Entry,
-};
-
-static struct platform_device omaplfb_device = {
-       .name                   = DEVNAME,
-       .id                             = -1,
-       .dev                    = {
-               .release                = OMAPLFBDeviceRelease_Entry
-       }
-};
-#endif 
-
-static int __init OMAPLFB_Init(void)
-{
-#if defined(LDM_PLATFORM)
-       int error;
-#endif
-
-       if(OMAPLFBInit() != OMAP_OK)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": OMAPLFB_Init: OMAPLFBInit failed\n");
-               return -ENODEV;
-       }
-
-#if defined(LDM_PLATFORM)
-       if ((error = platform_driver_register(&omaplfb_driver)) != 0)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": OMAPLFB_Init: Unable to register platform driver (%d)\n", error);
-
-               goto ExitDeinit;
-       }
-
-       if ((error = platform_device_register(&omaplfb_device)) != 0)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": OMAPLFB_Init:  Unable to register platform device (%d)\n", error);
-
-               goto ExitDriverUnregister;
-       }
-#endif 
-
-       return 0;
-
-#if defined(LDM_PLATFORM)
-ExitDriverUnregister:
-       platform_driver_unregister(&omaplfb_driver);
-
-ExitDeinit:
-       if(OMAPLFBDeinit() != OMAP_OK)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": OMAPLFB_Init: OMAPLFBDeinit failed\n");
-       }
-
-       return -ENODEV;
-#endif 
-}
-
-static IMG_VOID __exit OMAPLFB_Cleanup(IMG_VOID)
-{    
-#if defined (LDM_PLATFORM)
-       platform_device_unregister(&omaplfb_device);
-       platform_driver_unregister(&omaplfb_driver);
-#endif
-
-       if(OMAPLFBDeinit() != OMAP_OK)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": OMAPLFB_Cleanup: OMAPLFBDeinit failed\n");
-       }
-}
-
-module_init(OMAPLFB_Init);
-module_exit(OMAPLFB_Cleanup);
-
diff --git a/services4/3rdparty/dc_ti81xx_linux/3rdparty_dc_drm_shared.h b/services4/3rdparty/dc_ti81xx_linux/3rdparty_dc_drm_shared.h
new file mode 100644 (file)
index 0000000..9b6d240
--- /dev/null
@@ -0,0 +1,45 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#ifndef __3RDPARTY_DC_DRM_SHARED_H__
+#define __3RDPARTY_DC_DRM_SHARED_H__
+#if defined(SUPPORT_DRI_DRM)
+
+#define        PVR_DRM_DISP_CMD_ENTER_VT       1
+#define        PVR_DRM_DISP_CMD_LEAVE_VT       2
+
+#define        PVR_DRM_DISP_CMD_ON             3
+#define        PVR_DRM_DISP_CMD_STANDBY        4
+#define        PVR_DRM_DISP_CMD_SUSPEND        5
+#define        PVR_DRM_DISP_CMD_OFF            6
+
+#define        PVR_DRM_DISP_ARG_CMD            0
+#define        PVR_DRM_DISP_ARG_DEV            1
+#define        PVR_DRM_DISP_NUM_ARGS           2
+
+#endif 
+#endif 
+
index 8c3bad3..921e403 100644 (file)
@@ -1,10 +1,14 @@
 SYS_USING_INTERRUPTS = 1
 SUPPORT_OMAP3430_OMAPFB3 =1
 SUPPORT_TI_DSS_FW = 0
+PVR_LINUX_USING_WORKQUEUES = 1
 
 SYS_CFLAGS.$(SYS_USING_INTERRUPTS)                      += -DSYS_USING_INTERRUPTS
 SYS_CFLAGS.$(SUPPORT_OMAP3430_OMAPFB3)                         += -DSUPPORT_OMAP3430_OMAPFB3
 SYS_CFLAGS.$(SUPPORT_TI_DSS_FW)                         += -DSUPPORT_TI_DSS_FW
+SYS_CFLAGS.$(PVR_LINUX_USING_WORKQUEUES)             += -DPVR_LINUX_USING_WORKQUEUES
+SYS_CFLAGS += -DDISPLAY_CONTROLLER=omaplfb
+
 
 EXTRA_CFLAGS = -DLINUX \
                -DCONFIG_OMAP2_DSS \
@@ -18,5 +22,11 @@ ifneq ($(FBDEV),no)
 EXTRA_CFLAGS += -DFBDEV_PRESENT
 endif
 
+ifeq ($(SUPPORT_XORG),1)
+EXTRA_CFLAGS += -DSUPPORT_DRI_DRM
+EXTRA_CFLAGS += -DPVR_DISPLAY_CONTROLLER_DRM_IOCTL
+endif
+
+
 obj-m := omaplfb.o
 omaplfb-y := omaplfb_displayclass.o omaplfb_linux.o
index 0b37436..f52617a 100644 (file)
 # Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
 # 
 #
-#
 
-MODULE         = omaplfb
+MODULE         = $(DISPLAY_CONTROLLER)
 
-INCLUDES =     -I$(EURASIAROOT)/include4 \
-               -I$(EURASIAROOT)/services4/include \
-               -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
-               -I$(EURASIAROOT)/services4/system/include \
+INCLUDES =
 
-SOURCES        =       ../omaplfb_displayclass.c \
-                       ../omaplfb_linux.c
+SOURCES        =
 
 SYM_VERS_DEPS = $(EURASIAROOT)/services4/srvkm/env/linux
 
+include $(EURASIAROOT)/services4/$(DISPLAY_CONTROLLER_DIR)/makefile.linux.common
+
 include $(EURASIAROOT)/eurasiacon/build/linux/kbuild/Makefile.kbuild_subdir_common
diff --git a/services4/3rdparty/dc_ti81xx_linux/makefile.linux.common b/services4/3rdparty/dc_ti81xx_linux/makefile.linux.common
new file mode 100644 (file)
index 0000000..d5b4a30
--- /dev/null
@@ -0,0 +1,42 @@
+#
+# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+# 
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+# 
+# This program is distributed in the hope it will be useful but, except 
+# as otherwise stated in writing, without any warranty; without even the 
+# implied warranty of merchantability or fitness for a particular purpose. 
+# See the GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+# 
+#
+#
+
+ifeq ($(SUPPORT_DRI_DRM),1)
+DISPLAY_CONTROLLER_SOURCES_ROOT = $(KBUILDROOT)/$(DISPLAY_CONTROLLER_DIR)
+MODULE_CFLAGS += -DPVR_DISPLAY_CONTROLLER_DRM_IOCTL
+else
+DISPLAY_CONTROLLER_SOURCES_ROOT = ..
+endif
+
+INCLUDES +=    -I$(EURASIAROOT)/include4 \
+               -I$(EURASIAROOT)/services4/include \
+               -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
+               -I$(EURASIAROOT)/services4/system/include \
+               -I$(KERNELDIR)/drivers/video/omap2 \
+               -I$(KERNELDIR)/arch/arm/plat-omap/include
+
+SOURCES        +=      $(DISPLAY_CONTROLLER_SOURCES_ROOT)/omaplfb_displayclass.c \
+                       $(DISPLAY_CONTROLLER_SOURCES_ROOT)/omaplfb_linux.c
diff --git a/services4/3rdparty/dc_ti81xx_linux/modules.order b/services4/3rdparty/dc_ti81xx_linux/modules.order
deleted file mode 100755 (executable)
index 22c0747..0000000
+++ /dev/null
@@ -1 +0,0 @@
-kernel//proj/SGX_Graphics/mahesh/sgxsdk/Graphics_SDK_4_03_00_02/GFX_Linux_KM/services4/3rdparty/dc_ti81xx_linux/omaplfb.ko
index d3ae4bc..881a49d 100644 (file)
 #ifndef __OMAPLFB_H__
 #define __OMAPLFB_H__
 
-#define OMAPLCD_IRQ                    25
-
-#define OMAPLCD_SYSCONFIG           0x0410
-#define OMAPLCD_CONFIG              0x0444
-#define OMAPLCD_DEFAULT_COLOR0      0x044C
-#define OMAPLCD_TIMING_H            0x0464
-#define OMAPLCD_TIMING_V            0x0468
-#define OMAPLCD_POL_FREQ            0x046C
-#define OMAPLCD_DIVISOR             0x0470
-#define OMAPLCD_SIZE_DIG            0x0478
-#define OMAPLCD_SIZE_LCD            0x047C
-#define OMAPLCD_GFX_POSITION        0x0488
-#define OMAPLCD_GFX_SIZE            0x048C
-#define OMAPLCD_GFX_ATTRIBUTES      0x04a0
-#define OMAPLCD_GFX_FIFO_THRESHOLD  0x04a4
-#define OMAPLCD_GFX_WINDOW_SKIP     0x04b4
-
-#define OMAPLCD_IRQSTATUS       0x0418
-#define OMAPLCD_IRQENABLE       0x041c
-#define OMAPLCD_CONTROL         0x0440
-#define OMAPLCD_GFX_BA0         0x0480
-#define OMAPLCD_GFX_BA1         0x0484
-#define OMAPLCD_GFX_ROW_INC     0x04ac
-#define OMAPLCD_GFX_PIX_INC     0x04b0
-#define OMAPLCD_VID1_BA0        0x04bc
-#define OMAPLCD_VID1_BA1        0x04c0
-#define OMAPLCD_VID1_ROW_INC    0x04d8
-#define OMAPLCD_VID1_PIX_INC    0x04dc
-
-#define        OMAP_CONTROL_GODIGITAL      (1 << 6)
-#define        OMAP_CONTROL_GOLCD          (1 << 5)
-#define        OMAP_CONTROL_DIGITALENABLE  (1 << 1)
-#define        OMAP_CONTROL_LCDENABLE      (1 << 0)
-
-#define OMAPLCD_INTMASK_VSYNC       (1 << 1)
-#define OMAPLCD_INTMASK_OFF            0
-
-typedef void *       OMAP_HANDLE;
-
-typedef enum tag_omap_bool
-{
-       OMAP_FALSE = 0,
-       OMAP_TRUE  = 1,
-} OMAP_BOOL, *OMAP_PBOOL;
+#include <linux/version.h>
 
-typedef struct OMAPLFB_BUFFER_TAG
-{
-       unsigned long                ulBufferSize;
+#include <asm/atomic.h>
 
-       
-       
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
 
-       IMG_SYS_PHYADDR              sSysAddr;
-       IMG_CPU_VIRTADDR             sCPUVAddr;
-       PVRSRV_SYNC_DATA            *psSyncData;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
 
-       struct OMAPLFB_BUFFER_TAG       *psNext;
-} OMAPLFB_BUFFER;
+#define unref__ __attribute__ ((unused))
+
+typedef void *       OMAPLFB_HANDLE;
+
+typedef bool OMAPLFB_BOOL, *OMAPLFB_PBOOL;
+#define        OMAPLFB_FALSE false
+#define OMAPLFB_TRUE true
+
+typedef        atomic_t        OMAPLFB_ATOMIC_BOOL;
 
-typedef struct OMAPLFB_VSYNC_FLIP_ITEM_TAG
+typedef atomic_t       OMAPLFB_ATOMIC_INT;
+
+typedef struct OMAPLFB_BUFFER_TAG
 {
-       
+       struct OMAPLFB_BUFFER_TAG       *psNext;
+       struct OMAPLFB_DEVINFO_TAG      *psDevInfo;
 
+       struct work_struct sWork;
 
-       OMAP_HANDLE      hCmdComplete;
-       
-       unsigned long    ulSwapInterval;
-       
-       OMAP_BOOL        bValid;
-       
-       OMAP_BOOL        bFlipped;
        
-       OMAP_BOOL        bCmdCompleted;
+       unsigned long                   ulYOffset;
 
        
        
+       IMG_SYS_PHYADDR                 sSysAddr;
+       IMG_CPU_VIRTADDR                sCPUVAddr;
+       PVRSRV_SYNC_DATA                *psSyncData;
 
-       
-       IMG_SYS_PHYADDR* sSysAddr;
-} OMAPLFB_VSYNC_FLIP_ITEM;
+       OMAPLFB_HANDLE                  hCmdComplete;
+       unsigned long                   ulSwapInterval;
+} OMAPLFB_BUFFER;
 
-typedef struct PVRPDP_SWAPCHAIN_TAG
+typedef struct OMAPLFB_SWAPCHAIN_TAG
 {
        
-       unsigned long       ulBufferCount;
-       
-       OMAPLFB_BUFFER     *psBuffer;
-       
-       OMAPLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
-
-       
-       unsigned long       ulInsertIndex;
-       
-       
-       unsigned long       ulRemoveIndex;
+       unsigned int                    uiSwapChainID;
 
        
-       void *pvRegs;
+       unsigned long                   ulBufferCount;
 
        
-       PVRSRV_DC_DISP2SRV_KMJTABLE     *psPVRJTable;
+       OMAPLFB_BUFFER                  *psBuffer;
 
        
-       OMAP_BOOL           bFlushCommands;
+       struct workqueue_struct         *psWorkQueue;
 
        
-       unsigned long       ulSetFlushStateRefCount;
+       OMAPLFB_BOOL                    bNotVSynced;
 
        
-       OMAP_BOOL           bBlanked;
+       int                             iBlankEvents;
 
        
-       spinlock_t         *psSwapChainLock;
+       unsigned int                    uiFBDevID;
 } OMAPLFB_SWAPCHAIN;
 
 typedef struct OMAPLFB_FBINFO_TAG
@@ -164,7 +121,14 @@ typedef struct OMAPLFB_FBINFO_TAG
 
 typedef struct OMAPLFB_DEVINFO_TAG
 {
-       unsigned int            uiDeviceID;
+       
+       unsigned int            uiFBDevID;
+
+       
+       unsigned int            uiPVRDevID;
+
+       
+       struct mutex            sCreateSwapChainMutex;
 
        
        OMAPLFB_BUFFER          sSystemBuffer;
@@ -179,13 +143,13 @@ typedef struct OMAPLFB_DEVINFO_TAG
        OMAPLFB_FBINFO          sFBInfo;
 
        
-       unsigned long           ulRefCount;
+       OMAPLFB_SWAPCHAIN      *psSwapChain;
 
        
-       OMAPLFB_SWAPCHAIN      *psSwapChain;
+       unsigned int            uiSwapChainID;
 
        
-       OMAP_BOOL               bFlushCommands;
+       OMAPLFB_ATOMIC_BOOL     sFlushCommands;
 
        
        struct fb_info         *psLINFBInfo;
@@ -194,16 +158,10 @@ typedef struct OMAPLFB_DEVINFO_TAG
        struct notifier_block   sLINNotifBlock;
 
        
-       OMAP_BOOL               bDeviceSuspended;
-
-       
-       spinlock_t             sSwapChainLock;
-
-       
        
 
        
-       IMG_DEV_VIRTADDR                sDisplayDevVAddr;
+       IMG_DEV_VIRTADDR        sDisplayDevVAddr;
 
        DISPLAY_INFO            sDisplayInfo;
 
@@ -213,13 +171,26 @@ typedef struct OMAPLFB_DEVINFO_TAG
        
        DISPLAY_DIMS            sDisplayDim;
 
+       
+       OMAPLFB_ATOMIC_BOOL     sBlanked;
+
+       
+       OMAPLFB_ATOMIC_INT      sBlankEvents;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+       
+       OMAPLFB_ATOMIC_BOOL     sEarlySuspendFlag;
+
+       struct early_suspend    sEarlySuspend;
+#endif
+
+#if defined(SUPPORT_DRI_DRM)
+       OMAPLFB_ATOMIC_BOOL     sLeaveVT;
+#endif
+
 }  OMAPLFB_DEVINFO;
 
 #define        OMAPLFB_PAGE_SIZE 4096
-#define        OMAPLFB_PAGE_MASK (OMAPLFB_PAGE_SIZE - 1)
-#define        OMAPLFB_PAGE_TRUNC (~OMAPLFB_PAGE_MASK)
-
-#define        OMAPLFB_PAGE_ROUNDUP(x) (((x) + OMAPLFB_PAGE_MASK) & OMAPLFB_PAGE_TRUNC)
 
 #ifdef DEBUG
 #define        DEBUG_PRINTK(x) printk x
@@ -232,51 +203,67 @@ typedef struct OMAPLFB_DEVINFO_TAG
 #define        DEVNAME DRVNAME
 #define        DRIVER_PREFIX DRVNAME
 
-typedef enum _OMAP_ERROR_
+typedef enum _OMAPLFB_ERROR_
 {
-       OMAP_OK                             =  0,
-       OMAP_ERROR_GENERIC                  =  1,
-       OMAP_ERROR_OUT_OF_MEMORY            =  2,
-       OMAP_ERROR_TOO_FEW_BUFFERS          =  3,
-       OMAP_ERROR_INVALID_PARAMS           =  4,
-       OMAP_ERROR_INIT_FAILURE             =  5,
-       OMAP_ERROR_CANT_REGISTER_CALLBACK   =  6,
-       OMAP_ERROR_INVALID_DEVICE           =  7,
-       OMAP_ERROR_DEVICE_REGISTER_FAILED   =  8
-} OMAP_ERROR;
-
+       OMAPLFB_OK                             =  0,
+       OMAPLFB_ERROR_GENERIC                  =  1,
+       OMAPLFB_ERROR_OUT_OF_MEMORY            =  2,
+       OMAPLFB_ERROR_TOO_FEW_BUFFERS          =  3,
+       OMAPLFB_ERROR_INVALID_PARAMS           =  4,
+       OMAPLFB_ERROR_INIT_FAILURE             =  5,
+       OMAPLFB_ERROR_CANT_REGISTER_CALLBACK   =  6,
+       OMAPLFB_ERROR_INVALID_DEVICE           =  7,
+       OMAPLFB_ERROR_DEVICE_REGISTER_FAILED   =  8,
+       OMAPLFB_ERROR_SET_UPDATE_MODE_FAILED   =  9
+} OMAPLFB_ERROR;
+
+typedef enum _OMAPLFB_UPDATE_MODE_
+{
+       OMAPLFB_UPDATE_MODE_UNDEFINED                   = 0,
+       OMAPLFB_UPDATE_MODE_MANUAL                      = 1,
+       OMAPLFB_UPDATE_MODE_AUTO                        = 2,
+       OMAPLFB_UPDATE_MODE_DISABLED                    = 3
+} OMAPLFB_UPDATE_MODE;
 
 #ifndef UNREFERENCED_PARAMETER
 #define        UNREFERENCED_PARAMETER(param) (param) = (param)
 #endif
 
-OMAP_ERROR OMAPLFBInit(void);
-OMAP_ERROR OMAPLFBDeinit(void);
-
-#ifdef LDM_PLATFORM
-void OMAPLFBDriverSuspend(void);
-void OMAPLFBDriverResume(void);
-#endif
+OMAPLFB_ERROR OMAPLFBInit(void);
+OMAPLFB_ERROR OMAPLFBDeInit(void);
 
+OMAPLFB_DEVINFO *OMAPLFBGetDevInfoPtr(unsigned uiFBDevID);
+unsigned OMAPLFBMaxFBDevIDPlusOne(void);
 void *OMAPLFBAllocKernelMem(unsigned long ulSize);
 void OMAPLFBFreeKernelMem(void *pvMem);
-OMAP_ERROR OMAPLFBGetLibFuncAddr(char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
-OMAP_ERROR OMAPLFBInstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain);
-OMAP_ERROR OMAPLFBUninstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain);
-OMAP_BOOL OMAPLFBVSyncIHandler(OMAPLFB_SWAPCHAIN *psSwapChain);
-void OMAPLFBEnableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain);
-void OMAPLFBDisableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain);
-#if defined (SUPPORT_TI_DSS_FW)
-void OMAPLFBEnableDisplayRegisterAccess(void);
-void OMAPLFBDisableDisplayRegisterAccess(void);
-void OMAPLFBFlip(OMAPLFB_SWAPCHAIN *psSwapChain, unsigned long aPhyAddr);
-#endif
-#if defined (CONFIG_OMAP2_DSS)
-IMG_VOID OMAPLFBFlipDSS2(OMAPLFB_SWAPCHAIN *psSwapChain,
-                                                  IMG_UINT32 aPhyAddr);
-#endif
+OMAPLFB_ERROR OMAPLFBGetLibFuncAddr(char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
+OMAPLFB_ERROR OMAPLFBCreateSwapQueue (OMAPLFB_SWAPCHAIN *psSwapChain);
+void OMAPLFBDestroySwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain);
+void OMAPLFBInitBufferForSwap(OMAPLFB_BUFFER *psBuffer);
+void OMAPLFBSwapHandler(OMAPLFB_BUFFER *psBuffer);
+void OMAPLFBQueueBufferForSwap(OMAPLFB_SWAPCHAIN *psSwapChain, OMAPLFB_BUFFER *psBuffer);
+void OMAPLFBFlip(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_BUFFER *psBuffer);
+OMAPLFB_UPDATE_MODE OMAPLFBGetUpdateMode(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_BOOL OMAPLFBSetUpdateMode(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_UPDATE_MODE eMode);
+OMAPLFB_BOOL OMAPLFBWaitForVSync(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_BOOL OMAPLFBManualSync(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_BOOL OMAPLFBCheckModeAndSync(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_ERROR OMAPLFBUnblankDisplay(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_ERROR OMAPLFBEnableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_ERROR OMAPLFBDisableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainLockInit(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainLockDeInit(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainLock(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainUnLock(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBAtomicBoolInit(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal);
+void OMAPLFBAtomicBoolDeInit(OMAPLFB_ATOMIC_BOOL *psAtomic);
+void OMAPLFBAtomicBoolSet(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal);
+OMAPLFB_BOOL OMAPLFBAtomicBoolRead(OMAPLFB_ATOMIC_BOOL *psAtomic);
+void OMAPLFBAtomicIntInit(OMAPLFB_ATOMIC_INT *psAtomic, int iVal);
+void OMAPLFBAtomicIntDeInit(OMAPLFB_ATOMIC_INT *psAtomic);
+void OMAPLFBAtomicIntSet(OMAPLFB_ATOMIC_INT *psAtomic, int iVal);
+int OMAPLFBAtomicIntRead(OMAPLFB_ATOMIC_INT *psAtomic);
+void OMAPLFBAtomicIntInc(OMAPLFB_ATOMIC_INT *psAtomic);
 
-IMG_VOID OMAPLFBFlip(OMAPLFB_SWAPCHAIN *psSwapChain,
-                                                  IMG_UINT32 aPhyAddr);
 #endif 
 
index 62cef27..b5ba0d1 100644 (file)
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/notifier.h>
-#include <linux/spinlock.h>
 
 #include "img_defs.h"
 #include "servicesext.h"
 #include "kerneldisplay.h"
 #include "omaplfb.h"
 
-static void *gpvAnchor;
+#define OMAPLFB_COMMAND_COUNT          1
 
-static int fb_idx = 0;
+#define        OMAPLFB_VSYNC_SETTLE_COUNT      5
 
-#define OMAPLFB_COMMAND_COUNT          1
+#define        OMAPLFB_MAX_NUM_DEVICES         1 // FB_MAX
+#if (OMAPLFB_MAX_NUM_DEVICES > FB_MAX)
+#error "OMAPLFB_MAX_NUM_DEVICES must not be greater than FB_MAX"
+#endif
 
-static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = 0;
+static OMAPLFB_DEVINFO *gapsDevInfo[OMAPLFB_MAX_NUM_DEVICES];
 
-static OMAPLFB_DEVINFO * GetAnchorPtr(void)
-{
-       return (OMAPLFB_DEVINFO *)gpvAnchor;
-}
+static PFN_DC_GET_PVRJTABLE gpfnGetPVRJTable = NULL;
 
-static void SetAnchorPtr(OMAPLFB_DEVINFO *psDevInfo)
+static inline unsigned long RoundUpToMultiple(unsigned long x, unsigned long y)
 {
-       gpvAnchor = (void*)psDevInfo;
+       unsigned long div = x / y;
+       unsigned long rem = x % y;
+
+       return (div + ((rem == 0) ? 0 : 1)) * y;
 }
 
-       
-static void FlushInternalVSyncQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
+static unsigned long GCD(unsigned long x, unsigned long y)
 {
-       OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem;
-       unsigned long            ulMaxIndex;
-       unsigned long            i;
-
-       
-       psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
-       ulMaxIndex = psSwapChain->ulBufferCount - 1;
-
-       for(i = 0; i < psSwapChain->ulBufferCount; i++)
+       while (y != 0)
        {
-               if (psFlipItem->bValid == OMAP_FALSE)
-               {
-                       continue;
-               }
-
-               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": FlushInternalVSyncQueue: Flushing swap buffer (index %lu)\n", psSwapChain->ulRemoveIndex));
-
-               if(psFlipItem->bFlipped == OMAP_FALSE)
-               {
-                       
-                       OMAPLFBFlip(psSwapChain, (unsigned long)psFlipItem->sSysAddr);
-               }
-               
-               if(psFlipItem->bCmdCompleted == OMAP_FALSE)
-               {
-                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": FlushInternalVSyncQueue: Calling command complete for swap buffer (index %lu)\n", psSwapChain->ulRemoveIndex));
+               unsigned long r = x % y;
+               x = y;
+               y = r;
+       }
 
-                       psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete((IMG_HANDLE)psFlipItem->hCmdComplete, IMG_TRUE);
-               }
+       return x;
+}
 
-               
-               psSwapChain->ulRemoveIndex++;
-               
-               if(psSwapChain->ulRemoveIndex > ulMaxIndex)
-               {
-                       psSwapChain->ulRemoveIndex = 0;
-               }
+static unsigned long LCM(unsigned long x, unsigned long y)
+{
+       unsigned long gcd = GCD(x, y);
 
-               
-               psFlipItem->bFlipped = OMAP_FALSE;
-               psFlipItem->bCmdCompleted = OMAP_FALSE;
-               psFlipItem->bValid = OMAP_FALSE;
-               
-               
-               psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
-       }
+       return (gcd == 0) ? 0 : ((x / gcd) * y);
+}
 
-       psSwapChain->ulInsertIndex = 0;
-       psSwapChain->ulRemoveIndex = 0;
+unsigned OMAPLFBMaxFBDevIDPlusOne(void)
+{
+       return OMAPLFB_MAX_NUM_DEVICES;
 }
 
-static void SetFlushStateInternalNoLock(OMAPLFB_DEVINFO* psDevInfo,
-                                        OMAP_BOOL bFlushState)
+OMAPLFB_DEVINFO *OMAPLFBGetDevInfoPtr(unsigned uiFBDevID)
 {
-       OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
+       WARN_ON(uiFBDevID >= OMAPLFBMaxFBDevIDPlusOne());
 
-       if (psSwapChain == NULL)
+       if (uiFBDevID >= OMAPLFB_MAX_NUM_DEVICES)
        {
-               return;
+               return NULL;
        }
 
-       if (bFlushState)
-       {
-               if (psSwapChain->ulSetFlushStateRefCount == 0)
-               {
-                       OMAPLFBDisableVSyncInterrupt(psSwapChain);
-                       psSwapChain->bFlushCommands = OMAP_TRUE;
-                       FlushInternalVSyncQueue(psSwapChain);
-               }
-               psSwapChain->ulSetFlushStateRefCount++;
-       }
-       else
-       {
-               if (psSwapChain->ulSetFlushStateRefCount != 0)
-               {
-                       psSwapChain->ulSetFlushStateRefCount--;
-                       if (psSwapChain->ulSetFlushStateRefCount == 0)
-                       {
-                               psSwapChain->bFlushCommands = OMAP_FALSE;
-                               OMAPLFBEnableVSyncInterrupt(psSwapChain);
-                       }
-               }
-       }
+       return gapsDevInfo[uiFBDevID];
 }
 
-static IMG_VOID SetFlushStateInternal(OMAPLFB_DEVINFO* psDevInfo,
-                                      OMAP_BOOL bFlushState)
+static inline void OMAPLFBSetDevInfoPtr(unsigned uiFBDevID, OMAPLFB_DEVINFO *psDevInfo)
 {
-       unsigned long ulLockFlags;
-
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
+       WARN_ON(uiFBDevID >= OMAPLFB_MAX_NUM_DEVICES);
 
-       SetFlushStateInternalNoLock(psDevInfo, bFlushState);
-
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
+       if (uiFBDevID < OMAPLFB_MAX_NUM_DEVICES)
+       {
+               gapsDevInfo[uiFBDevID] = psDevInfo;
+       }
 }
 
-static void SetFlushStateExternal(OMAPLFB_DEVINFO* psDevInfo,
-                                  OMAP_BOOL bFlushState)
+static inline OMAPLFB_BOOL SwapChainHasChanged(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_SWAPCHAIN *psSwapChain)
 {
-       unsigned long ulLockFlags;
+       return (psDevInfo->psSwapChain != psSwapChain) ||
+               (psDevInfo->uiSwapChainID != psSwapChain->uiSwapChainID);
+}
 
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
+static inline OMAPLFB_BOOL DontWaitForVSync(OMAPLFB_DEVINFO *psDevInfo)
+{
+       OMAPLFB_BOOL bDontWait;
 
-       
-       if (psDevInfo->bFlushCommands != bFlushState)
-       {
-               psDevInfo->bFlushCommands = bFlushState;
-               SetFlushStateInternalNoLock(psDevInfo, bFlushState);
-       }
+       bDontWait = OMAPLFBAtomicBoolRead(&psDevInfo->sBlanked) ||
+                       OMAPLFBAtomicBoolRead(&psDevInfo->sFlushCommands);
 
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+       bDontWait = bDontWait || OMAPLFBAtomicBoolRead(&psDevInfo->sEarlySuspendFlag);
+#endif
+#if defined(SUPPORT_DRI_DRM)
+       bDontWait = bDontWait || OMAPLFBAtomicBoolRead(&psDevInfo->sLeaveVT);
+#endif
+       return bDontWait;
 }
 
 static IMG_VOID SetDCState(IMG_HANDLE hDevice, IMG_UINT32 ui32State)
@@ -180,196 +133,68 @@ static IMG_VOID SetDCState(IMG_HANDLE hDevice, IMG_UINT32 ui32State)
        switch (ui32State)
        {
                case DC_STATE_FLUSH_COMMANDS:
-                       SetFlushStateExternal(psDevInfo, OMAP_TRUE);
+                       OMAPLFBAtomicBoolSet(&psDevInfo->sFlushCommands, OMAPLFB_TRUE);
                        break;
                case DC_STATE_NO_FLUSH_COMMANDS:
-                       SetFlushStateExternal(psDevInfo, OMAP_FALSE);
+                       OMAPLFBAtomicBoolSet(&psDevInfo->sFlushCommands, OMAPLFB_FALSE);
                        break;
                default:
                        break;
        }
-
-       return;
 }
 
-static int FrameBufferEvents(struct notifier_block *psNotif,
-                             unsigned long event, void *data)
+static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 uiPVRDevID,
+                                 IMG_HANDLE *phDevice,
+                                 PVRSRV_SYNC_DATA* psSystemBufferSyncData)
 {
        OMAPLFB_DEVINFO *psDevInfo;
-       OMAPLFB_SWAPCHAIN *psSwapChain;
-       struct fb_event *psFBEvent = (struct fb_event *)data;
-       OMAP_BOOL bBlanked;
-
-       
-       if (event != FB_EVENT_BLANK)
-       {
-               return 0;
-       }
-
-       psDevInfo = GetAnchorPtr();
-       psSwapChain = psDevInfo->psSwapChain;
-
-       bBlanked = (*(IMG_INT *)psFBEvent->data != 0) ? OMAP_TRUE: OMAP_FALSE;
+       OMAPLFB_ERROR eError;
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
 
-       if (bBlanked != psSwapChain->bBlanked)
+       for (i = 0; i < uiMaxFBDevIDPlusOne; i++)
        {
-               psSwapChain->bBlanked = bBlanked;
-
-               if (bBlanked)
+               psDevInfo = OMAPLFBGetDevInfoPtr(i);
+               if (psDevInfo != NULL && psDevInfo->uiPVRDevID == uiPVRDevID)
                {
-                       
-                       SetFlushStateInternal(psDevInfo, OMAP_TRUE);
-               }
-               else
-               {
-                       
-                       SetFlushStateInternal(psDevInfo, OMAP_FALSE);
+                       break;
                }
        }
-
-       return 0;
-}
-
-
-static OMAP_ERROR UnblankDisplay(OMAPLFB_DEVINFO *psDevInfo)
-{
-       int res;
-#ifdef FBDEV_PRESENT
-
-       acquire_console_sem();
-       res = fb_blank(psDevInfo->psLINFBInfo, 0);
-       release_console_sem();
-#if !defined (CONFIG_OMAP2_DSS)
-       if (res != 0 && res != -EINVAL)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX
-                       ": fb_blank failed (%d)", res);
-               return (OMAP_ERROR_GENERIC);
-       }
-#endif
-#endif
-       return (OMAP_OK);
-}
-
-#if defined (CONFIG_OMAP2_DSS)
-#include <linux/workqueue.h>
-struct wq_flip {
-        struct fb_var_screeninfo var;
-            struct fb_info *psLINFBInfo;
-        struct work_struct work;
-};
-struct wq_flip wq_flipdss2;
-
-static void dss2_pan_display (struct work_struct *work)
-{
-    struct wq_flip *ptrwq_flip =
-            container_of(work, struct wq_flip, work);
-    if (ptrwq_flip->psLINFBInfo->fbops->fb_pan_display != NULL) {
-        ptrwq_flip->psLINFBInfo->fbops->fb_pan_display (&ptrwq_flip->var, ptrwq_flip->psLINFBInfo);
-
-    }
-
-}
-
-/*
- *          Flip implementation for DSS2 using fb_pan_display
- *          */
-IMG_VOID OMAPLFBFlipDSS2(OMAPLFB_SWAPCHAIN *psSwapChain,
-                                                  IMG_UINT32 aPhyAddr)
-{
-        OMAPLFB_DEVINFO *psDevInfo = GetAnchorPtr ();
-        struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
-        memcpy ( &wq_flipdss2.var, &psLINFBInfo->var, sizeof(struct fb_var_screeninfo));
-    wq_flipdss2.var.yoffset = (aPhyAddr-psLINFBInfo->fix.smem_start)/psLINFBInfo->fix.line_length;
-        wq_flipdss2.psLINFBInfo = psLINFBInfo;
-        schedule_work (&wq_flipdss2.work);
-}
-#endif
-
-
-
-static OMAP_ERROR EnableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo)
-{
-       int                res;
-       OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
-       OMAP_ERROR         eError;
-
-       
-       memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
-
-       psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents;
-
-       psSwapChain->bBlanked = OMAP_FALSE;
-
-       res = fb_register_client(&psDevInfo->sLINNotifBlock);
-       if (res != 0)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX
-                       ": fb_register_client failed (%d)", res);
-
-               return (OMAP_ERROR_GENERIC);
-       }
-
-       eError = UnblankDisplay(psDevInfo);
-       if (eError != OMAP_OK)
+       if (i == uiMaxFBDevIDPlusOne)
        {
                DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
-                       ": UnblankDisplay failed (%d)", eError));
-               return eError;
-       }
-
-       return (OMAP_OK);
-}
-
-static OMAP_ERROR DisableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo)
-{
-       int res;
-
-       
-       res = fb_unregister_client(&psDevInfo->sLINNotifBlock);
-       if (res != 0)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX
-                       ": fb_unregister_client failed (%d)", res);
-               return (OMAP_ERROR_GENERIC);
+                       ": %s: PVR Device %u not found\n", __FUNCTION__, uiPVRDevID));
+               return PVRSRV_ERROR_INVALID_DEVICE;
        }
 
-       return (OMAP_OK);
-}
-
-static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID,
-                                 IMG_HANDLE *phDevice,
-                                 PVRSRV_SYNC_DATA* psSystemBufferSyncData)
-{
-       OMAPLFB_DEVINFO *psDevInfo;
-       OMAP_ERROR eError;
-
-       UNREFERENCED_PARAMETER(ui32DeviceID);
-
-       psDevInfo = GetAnchorPtr();
-
        
        psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
        
-       eError = UnblankDisplay(psDevInfo);
-       if (eError != OMAP_OK)
+       eError = OMAPLFBUnblankDisplay(psDevInfo);
+       if (eError != OMAPLFB_OK)
        {
                DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
-                       ": UnblankDisplay failed (%d)", eError));
-               return (PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED);
+                       ": %s: Device %u: OMAPLFBUnblankDisplay failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, eError));
+               return PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED;
        }
 
        
        *phDevice = (IMG_HANDLE)psDevInfo;
        
-       return (PVRSRV_OK);
+       return PVRSRV_OK;
 }
 
 static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice)
 {
-       UNREFERENCED_PARAMETER(hDevice);
+#if defined(SUPPORT_DRI_DRM)
+       OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO *)hDevice;
 
-       return (PVRSRV_OK);
+       OMAPLFBAtomicBoolSet(&psDevInfo->sLeaveVT, OMAPLFB_FALSE);
+       (void) OMAPLFBUnblankDisplay(psDevInfo);
+#else
+       UNREFERENCED_PARAMETER(hDevice);
+#endif
+       return PVRSRV_OK;
 }
 
 static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
@@ -380,7 +205,7 @@ static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
        
        if(!hDevice || !pui32NumFormats)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
 
        psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
@@ -392,7 +217,7 @@ static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
                psFormat[0] = psDevInfo->sDisplayFormat;
        }
 
-       return (PVRSRV_OK);
+       return PVRSRV_OK;
 }
 
 static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice, 
@@ -404,7 +229,7 @@ static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice,
 
        if(!hDevice || !psFormat || !pui32NumDims)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
 
        psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
@@ -417,7 +242,7 @@ static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice,
                psDim[0] = psDevInfo->sDisplayDim;
        }
        
-       return (PVRSRV_OK);
+       return PVRSRV_OK;
 }
 
 
@@ -427,14 +252,14 @@ static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE *phBuffer)
        
        if(!hDevice || !phBuffer)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
 
        psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
 
        *phBuffer = (IMG_HANDLE)&psDevInfo->sSystemBuffer;
 
-       return (PVRSRV_OK);
+       return PVRSRV_OK;
 }
 
 
@@ -444,14 +269,14 @@ static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO *psDCInfo)
        
        if(!hDevice || !psDCInfo)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
 
        psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
 
        *psDCInfo = psDevInfo->sDisplayInfo;
 
-       return (PVRSRV_OK);
+       return PVRSRV_OK;
 }
 
 static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE        hDevice,
@@ -470,28 +295,30 @@ static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE        hDevice,
 
        if(!hDevice)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-       
+
        if(!hBuffer)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
-       psSystemBuffer = (OMAPLFB_BUFFER *)hBuffer;
 
        if (!ppsSysAddr)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
 
-       *ppsSysAddr = &psSystemBuffer->sSysAddr;
-
        if (!pui32ByteSize)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
 
+       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+       psSystemBuffer = (OMAPLFB_BUFFER *)hBuffer;
+
+       *ppsSysAddr = &psSystemBuffer->sSysAddr;
+
        *pui32ByteSize = (IMG_UINT32)psDevInfo->sFBInfo.ulBufferSize;
 
        if (ppvCpuVAddr)
@@ -509,7 +336,7 @@ static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE        hDevice,
                *pbIsContiguous = IMG_TRUE;
        }
 
-       return (PVRSRV_OK);
+       return PVRSRV_OK;
 }
 
 static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
@@ -525,14 +352,11 @@ static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
        OMAPLFB_DEVINFO *psDevInfo;
        OMAPLFB_SWAPCHAIN *psSwapChain;
        OMAPLFB_BUFFER *psBuffer;
-       OMAPLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
        IMG_UINT32 i;
-       PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_SUPPORTED;
-       unsigned long ulLockFlags;
+       PVRSRV_ERROR eError;
        IMG_UINT32 ui32BuffersToSkip;
 
        UNREFERENCED_PARAMETER(ui32OEMFlags);
-       UNREFERENCED_PARAMETER(pui32SwapChainID);
        
        
        if(!hDevice
@@ -541,7 +365,7 @@ static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
        || !ppsSyncData
        || !phSwapChain)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
 
        psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
@@ -549,24 +373,29 @@ static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
        
        if (psDevInfo->sDisplayInfo.ui32MaxSwapChains == 0)
        {
-               return (PVRSRV_ERROR_NOT_SUPPORTED);
+               return PVRSRV_ERROR_NOT_SUPPORTED;
        }
 
+       OMAPLFBCreateSwapChainLock(psDevInfo);
+
        
        if(psDevInfo->psSwapChain != NULL)
        {
-               return (PVRSRV_ERROR_FLIP_CHAIN_EXISTS);
+               eError = PVRSRV_ERROR_FLIP_CHAIN_EXISTS;
+               goto ExitUnLock;
        }
        
        
        if(ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
        {
-               return (PVRSRV_ERROR_TOOMANYBUFFERS);
+               eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+               goto ExitUnLock;
        }
        
        if ((psDevInfo->sFBInfo.ulRoundedBufferSize * (unsigned long)ui32BufferCount) > psDevInfo->sFBInfo.ulFBSize)
        {
-               return (PVRSRV_ERROR_TOOMANYBUFFERS);
+               eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+               goto ExitUnLock;
        }
 
        
@@ -579,7 +408,8 @@ static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
        || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
        {
                
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto ExitUnLock;
        }               
 
        if(psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat
@@ -588,17 +418,25 @@ static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
        || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
        {
                
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto ExitUnLock;
        }               
 
        
        UNREFERENCED_PARAMETER(ui32Flags);
        
+#if defined(PVR_OMAPFB3_UPDATE_MODE)
+       if (!OMAPLFBSetUpdateMode(psDevInfo, PVR_OMAPFB3_UPDATE_MODE))
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't set frame buffer update mode %d\n", __FUNCTION__, psDevInfo->uiFBDevID, PVR_OMAPFB3_UPDATE_MODE);
+       }
+#endif
        
        psSwapChain = (OMAPLFB_SWAPCHAIN*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_SWAPCHAIN));
        if(!psSwapChain)
        {
-               return (PVRSRV_ERROR_OUT_OF_MEMORY);
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto ExitUnLock;
        }
 
        psBuffer = (OMAPLFB_BUFFER*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_BUFFER) * ui32BufferCount);
@@ -608,20 +446,10 @@ static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
                goto ErrorFreeSwapChain;
        }
 
-       psVSyncFlips = (OMAPLFB_VSYNC_FLIP_ITEM *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_VSYNC_FLIP_ITEM) * ui32BufferCount);
-       if (!psVSyncFlips)
-       {
-               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
-               goto ErrorFreeBuffers;
-       }
-
        psSwapChain->ulBufferCount = (unsigned long)ui32BufferCount;
        psSwapChain->psBuffer = psBuffer;
-       psSwapChain->psVSyncFlips = psVSyncFlips;
-       psSwapChain->ulInsertIndex = 0;
-       psSwapChain->ulRemoveIndex = 0;
-       psSwapChain->psPVRJTable = &psDevInfo->sPVRJTable;
-       psSwapChain->psSwapChainLock = &psDevInfo->sSwapChainLock;
+       psSwapChain->bNotVSynced = OMAPLFB_TRUE;
+       psSwapChain->uiFBDevID = psDevInfo->uiFBDevID;
 
        
        for(i=0; i<ui32BufferCount-1; i++)
@@ -641,84 +469,51 @@ static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
 
                psBuffer[i].sSysAddr.uiAddr = psDevInfo->sFBInfo.sSysAddr.uiAddr + ui32BufferOffset;
                psBuffer[i].sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr + ui32BufferOffset;
-       }
+               psBuffer[i].ulYOffset = ui32BufferOffset / psDevInfo->sFBInfo.ulByteStride;
+               psBuffer[i].psDevInfo = psDevInfo;
 
-       
-       for(i=0; i<ui32BufferCount; i++)
-       {
-               psVSyncFlips[i].bValid = OMAP_FALSE;
-               psVSyncFlips[i].bFlipped = OMAP_FALSE;
-               psVSyncFlips[i].bCmdCompleted = OMAP_FALSE;
+               OMAPLFBInitBufferForSwap(&psBuffer[i]);
        }
 
-#if defined (SUPPORT_TI_DSS_FW)
-       OMAPLFBEnableDisplayRegisterAccess();
-
-       
-       psSwapChain->pvRegs = ioremap(psDevInfo->psLINFBInfo->fix.mmio_start, psDevInfo->psLINFBInfo->fix.mmio_len);
-       if (psSwapChain->pvRegs == NULL)
-       {
-               eError = PVRSRV_ERROR_BAD_MAPPING;
-               printk(KERN_WARNING DRIVER_PREFIX ": Couldn't map registers needed for flipping\n");
-               goto ErrorDisableDisplayRegisters;
-       }
-#endif
-       if (OMAPLFBInstallVSyncISR(psSwapChain) != OMAP_OK)
+       if (OMAPLFBCreateSwapQueue(psSwapChain) != OMAPLFB_OK)
        { 
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Failed to create workqueue\n", __FUNCTION__, psDevInfo->uiFBDevID);
                eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
-               printk(KERN_WARNING DRIVER_PREFIX ": ISR handler failed to register\n");
-               goto ErrorUnmapRegisters;
+               goto ErrorFreeBuffers;
        }
 
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       
-       psDevInfo->psSwapChain = psSwapChain;
-
-       
-       psSwapChain->bFlushCommands = psDevInfo->bFlushCommands;
-
-       if (psSwapChain->bFlushCommands)
+       if (OMAPLFBEnableLFBEventNotification(psDevInfo)!= OMAPLFB_OK)
        {
-               psSwapChain->ulSetFlushStateRefCount = 1;
-       }
-       else
-       {
-               psSwapChain->ulSetFlushStateRefCount = 0;
-               OMAPLFBEnableVSyncInterrupt(psSwapChain);
+               eError = PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT;
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't enable framebuffer event notification\n", __FUNCTION__, psDevInfo->uiFBDevID);
+               goto ErrorDestroySwapQueue;
        }
-               
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
 
-       if (EnableLFBEventNotification(psDevInfo)!= OMAP_OK)
+       psDevInfo->uiSwapChainID++;
+       if (psDevInfo->uiSwapChainID == 0)
        {
-               eError = PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT;
-               printk(KERN_WARNING DRIVER_PREFIX ": Couldn't enable framebuffer event notification\n");
-               goto ErrorUninstallVSyncInterrupt;
+               psDevInfo->uiSwapChainID++;
        }
 
-       
+       psSwapChain->uiSwapChainID = psDevInfo->uiSwapChainID;
+
+       psDevInfo->psSwapChain = psSwapChain;
+
+       *pui32SwapChainID = psDevInfo->uiSwapChainID;
+
        *phSwapChain = (IMG_HANDLE)psSwapChain;
 
-       return (PVRSRV_OK);
+       eError = PVRSRV_OK;
+       goto ExitUnLock;
 
-ErrorUninstallVSyncInterrupt:
-       if(OMAPLFBUninstallVSyncISR(psSwapChain) != OMAP_OK)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": Couldn't uninstall VSync ISR\n");
-       }
-ErrorUnmapRegisters:
-#if defined (SUPPORT_TI_DSS_FW)
-       iounmap(psSwapChain->pvRegs);
-ErrorDisableDisplayRegisters:
-       OMAPLFBDisableDisplayRegisterAccess();
-#endif
-       OMAPLFBFreeKernelMem(psVSyncFlips);
+ErrorDestroySwapQueue:
+       OMAPLFBDestroySwapQueue(psSwapChain);
 ErrorFreeBuffers:
        OMAPLFBFreeKernelMem(psBuffer);
 ErrorFreeSwapChain:
        OMAPLFBFreeKernelMem(psSwapChain);
-
+ExitUnLock:
+       OMAPLFBCreateSwapChainUnLock(psDevInfo);
        return eError;
 }
 
@@ -727,59 +522,52 @@ static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice,
 {
        OMAPLFB_DEVINFO *psDevInfo;
        OMAPLFB_SWAPCHAIN *psSwapChain;
-       unsigned long ulLockFlags;
-       OMAP_ERROR eError;
+       OMAPLFB_ERROR eError;
 
+       
        if(!hDevice || !hSwapChain)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
        
        psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
        psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
-       if (psSwapChain != psDevInfo->psSwapChain)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       eError = DisableLFBEventNotification(psDevInfo);
-       if (eError != OMAP_OK)
-       {
-               printk(KERN_WARNING DRIVER_PREFIX ": Couldn't disable framebuffer event notification\n");
-       }
 
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
+       OMAPLFBCreateSwapChainLock(psDevInfo);
 
-       OMAPLFBDisableVSyncInterrupt(psSwapChain);
-
-       
-       FlushInternalVSyncQueue(psSwapChain);
+       if (SwapChainHasChanged(psDevInfo, psSwapChain))
+       {
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: Swap chain mismatch\n", __FUNCTION__, psDevInfo->uiFBDevID);
 
-       
-       OMAPLFBFlip(psSwapChain, (unsigned long)psDevInfo->sFBInfo.sSysAddr.uiAddr);
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto ExitUnLock;
+       }
 
        
-       psDevInfo->psSwapChain = NULL;
+       OMAPLFBDestroySwapQueue(psSwapChain);
 
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       if(OMAPLFBUninstallVSyncISR(psSwapChain) != OMAP_OK)
+       eError = OMAPLFBDisableLFBEventNotification(psDevInfo);
+       if (eError != OMAPLFB_OK)
        {
-               printk(KERN_WARNING DRIVER_PREFIX ": Couldn't uninstall VSync ISR\n");
-               return (PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR);
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't disable framebuffer event notification\n", __FUNCTION__, psDevInfo->uiFBDevID);
        }
 
-#if defined (SUPPORT_TI_DSS_FW)        
-       iounmap(psSwapChain->pvRegs);
-
-       OMAPLFBDisableDisplayRegisterAccess();
-#endif
        
-       OMAPLFBFreeKernelMem(psSwapChain->psVSyncFlips);
        OMAPLFBFreeKernelMem(psSwapChain->psBuffer);
        OMAPLFBFreeKernelMem(psSwapChain);
 
-       return (PVRSRV_OK);
+       psDevInfo->psSwapChain = NULL;
+
+       OMAPLFBFlip(psDevInfo, &psDevInfo->sSystemBuffer);
+       (void) OMAPLFBCheckModeAndSync(psDevInfo);
+
+       eError = PVRSRV_OK;
+
+ExitUnLock:
+       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+       return eError;
 }
 
 static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
@@ -792,7 +580,7 @@ static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
 
        
        
-       return (PVRSRV_ERROR_NOT_SUPPORTED);
+       return PVRSRV_ERROR_NOT_SUPPORTED;
 }
 
 static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
@@ -805,7 +593,7 @@ static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
 
        
 
-       return (PVRSRV_ERROR_NOT_SUPPORTED);
+       return PVRSRV_ERROR_NOT_SUPPORTED;
 }
 
 static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
@@ -818,7 +606,7 @@ static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
 
        
 
-       return (PVRSRV_ERROR_NOT_SUPPORTED);
+       return PVRSRV_ERROR_NOT_SUPPORTED;
 }
 
 static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
@@ -831,7 +619,7 @@ static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
 
        
 
-       return (PVRSRV_ERROR_NOT_SUPPORTED);
+       return PVRSRV_ERROR_NOT_SUPPORTED;
 }
 
 static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
@@ -841,7 +629,8 @@ static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
 {
        OMAPLFB_DEVINFO   *psDevInfo;
        OMAPLFB_SWAPCHAIN *psSwapChain;
-       unsigned long      i;
+       PVRSRV_ERROR eError;
+       unsigned i;
        
        
        if(!hDevice 
@@ -849,14 +638,21 @@ static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
        || !pui32BufferCount
        || !phBuffer)
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               return PVRSRV_ERROR_INVALID_PARAMS;
        }
        
        psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
        psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
-       if (psSwapChain != psDevInfo->psSwapChain)
+
+       OMAPLFBCreateSwapChainLock(psDevInfo);
+
+       if (SwapChainHasChanged(psDevInfo, psSwapChain))
        {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: Swap chain mismatch\n", __FUNCTION__, psDevInfo->uiFBDevID);
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto Exit;
        }
        
        
@@ -868,7 +664,12 @@ static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
                phBuffer[i] = (IMG_HANDLE)&psSwapChain->psBuffer[i];
        }
        
-       return (PVRSRV_OK);
+       eError = PVRSRV_OK;
+
+Exit:
+       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+       return eError;
 }
 
 static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
@@ -878,137 +679,92 @@ static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
                                    IMG_UINT32 ui32ClipRectCount,
                                    IMG_RECT *psClipRect)
 {
-       OMAPLFB_DEVINFO *psDevInfo;
-
+       UNREFERENCED_PARAMETER(hDevice);
+       UNREFERENCED_PARAMETER(hBuffer);
        UNREFERENCED_PARAMETER(ui32SwapInterval);
        UNREFERENCED_PARAMETER(hPrivateTag);
+       UNREFERENCED_PARAMETER(ui32ClipRectCount);
        UNREFERENCED_PARAMETER(psClipRect);
        
-       if(!hDevice 
-       || !hBuffer
-       || (ui32ClipRectCount != 0))
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-
        
-       return (PVRSRV_OK);
+
+       return PVRSRV_OK;
 }
 
 static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice,
                                    IMG_HANDLE hSwapChain)
 {
-       OMAPLFB_DEVINFO   *psDevInfo;
-       OMAPLFB_SWAPCHAIN *psSwapChain;
-       unsigned long      ulLockFlags;
-
-       if(!hDevice || !hSwapChain)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
-
-       psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
-       psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
-       if (psSwapChain != psDevInfo->psSwapChain)
-       {
-               return (PVRSRV_ERROR_INVALID_PARAMS);
-       }
+       UNREFERENCED_PARAMETER(hDevice);
+       UNREFERENCED_PARAMETER(hSwapChain);
        
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
-
        
-       FlushInternalVSyncQueue(psSwapChain);
-
-       
-       OMAPLFBFlip(psSwapChain, (unsigned long)psDevInfo->sFBInfo.sSysAddr.uiAddr);
-
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-       return (PVRSRV_OK);
+       return PVRSRV_OK;
 }
 
-OMAP_BOOL OMAPLFBVSyncIHandler(OMAPLFB_SWAPCHAIN *psSwapChain)
+static OMAPLFB_BOOL WaitForVSyncSettle(OMAPLFB_DEVINFO *psDevInfo)
 {
-       OMAP_BOOL bStatus = OMAP_FALSE;
-       OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem;
-       unsigned long ulMaxIndex;
-       unsigned long ulLockFlags;
+               unsigned i;
+               for(i = 0; i < OMAPLFB_VSYNC_SETTLE_COUNT; i++)
+               {
+                       if (DontWaitForVSync(psDevInfo) || !OMAPLFBWaitForVSync(psDevInfo))
+                       {
+                               return OMAPLFB_FALSE;
+                       }
+               }
 
-       psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
-       ulMaxIndex = psSwapChain->ulBufferCount - 1;
+               return OMAPLFB_TRUE;
+}
 
-       spin_lock_irqsave(psSwapChain->psSwapChainLock, ulLockFlags);
+void OMAPLFBSwapHandler(OMAPLFB_BUFFER *psBuffer)
+{
+       OMAPLFB_DEVINFO *psDevInfo = psBuffer->psDevInfo;
+       OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
+       OMAPLFB_BOOL bPreviouslyNotVSynced;
 
-       
-       if (psSwapChain->bFlushCommands)
+#if defined(SUPPORT_DRI_DRM)
+       if (!OMAPLFBAtomicBoolRead(&psDevInfo->sLeaveVT))
+#endif
        {
-               goto ExitUnlock;
+               OMAPLFBFlip(psDevInfo, psBuffer);
        }
 
-       while(psFlipItem->bValid)
-       {       
-               
-               if(psFlipItem->bFlipped)
-               {
-                       
-                       if(!psFlipItem->bCmdCompleted)
-                       {
-                               
-                               psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete((IMG_HANDLE)psFlipItem->hCmdComplete, IMG_TRUE);
+       bPreviouslyNotVSynced = psSwapChain->bNotVSynced;
+       psSwapChain->bNotVSynced = OMAPLFB_TRUE;
 
-                               
-                               psFlipItem->bCmdCompleted = OMAP_TRUE;
-                       }
 
-                       
-                       psFlipItem->ulSwapInterval--;
+       if (!DontWaitForVSync(psDevInfo))
+       {
+               OMAPLFB_UPDATE_MODE eMode = OMAPLFBGetUpdateMode(psDevInfo);
+               int iBlankEvents = OMAPLFBAtomicIntRead(&psDevInfo->sBlankEvents);
 
-                       
-                       if(psFlipItem->ulSwapInterval == 0)
-                       {       
-                               
-                               psSwapChain->ulRemoveIndex++;
-                               
-                               if(psSwapChain->ulRemoveIndex > ulMaxIndex)
+               switch(eMode)
+               {
+                       case OMAPLFB_UPDATE_MODE_AUTO:
+                               psSwapChain->bNotVSynced = OMAPLFB_FALSE;
+
+                               if (bPreviouslyNotVSynced || psSwapChain->iBlankEvents != iBlankEvents)
+                               {
+                                       psSwapChain->iBlankEvents = iBlankEvents;
+                                       psSwapChain->bNotVSynced = !WaitForVSyncSettle(psDevInfo);
+                               } else if (psBuffer->ulSwapInterval != 0)
                                {
-                                       psSwapChain->ulRemoveIndex = 0;
+                                       psSwapChain->bNotVSynced = !OMAPLFBWaitForVSync(psDevInfo);
                                }
-                               
-                               
-                               psFlipItem->bCmdCompleted = OMAP_FALSE;
-                               psFlipItem->bFlipped = OMAP_FALSE;
-       
-                               
-                               psFlipItem->bValid = OMAP_FALSE;
-                       }
-                       else
-                       {
-                               
                                break;
-                       }
-               }
-               else
-               {
-                       
-                       OMAPLFBFlip(psSwapChain, (unsigned long)psFlipItem->sSysAddr);
-                       
-                       
-                       psFlipItem->bFlipped = OMAP_TRUE;
-                       
-                       
-                       break;
+#if defined(PVR_OMAPFB3_MANUAL_UPDATE_SYNC_IN_SWAP)
+                       case OMAPLFB_UPDATE_MODE_MANUAL:
+                               if (psBuffer->ulSwapInterval != 0)
+                               {
+                                       (void) OMAPLFBManualSync(psDevInfo);
+                               }
+                               break;
+#endif
+                       default:
+                               break;
                }
-               
-               
-               psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
        }
-               
-ExitUnlock:
-       spin_unlock_irqrestore(psSwapChain->psSwapChainLock, ulLockFlags);
 
-       return bStatus;
+       psDevInfo->sPVRJTable.pfnPVRSRVCmdComplete((IMG_HANDLE)psBuffer->hCmdComplete, IMG_TRUE);
 }
 
 static IMG_BOOL ProcessFlip(IMG_HANDLE  hCmdCookie,
@@ -1019,10 +775,6 @@ static IMG_BOOL ProcessFlip(IMG_HANDLE  hCmdCookie,
        OMAPLFB_DEVINFO *psDevInfo;
        OMAPLFB_BUFFER *psBuffer;
        OMAPLFB_SWAPCHAIN *psSwapChain;
-#if defined(SYS_USING_INTERRUPTS)
-       OMAPLFB_VSYNC_FLIP_ITEM* psFlipItem;
-#endif
-       unsigned long ulLockFlags;
 
        
        if(!hCmdCookie || !pvData)
@@ -1040,102 +792,69 @@ static IMG_BOOL ProcessFlip(IMG_HANDLE  hCmdCookie,
 
        
        psDevInfo = (OMAPLFB_DEVINFO*)psFlipCmd->hExtDevice;
-       
        psBuffer = (OMAPLFB_BUFFER*)psFlipCmd->hExtBuffer;
        psSwapChain = (OMAPLFB_SWAPCHAIN*) psFlipCmd->hExtSwapChain;
 
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
+       OMAPLFBCreateSwapChainLock(psDevInfo);
 
-       
-       if (psDevInfo->bDeviceSuspended)
-       {
-               psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
-               goto ExitTrueUnlock;
-       }
-
-#if defined(SYS_USING_INTERRUPTS)
-       
-       if(psFlipCmd->ui32SwapInterval == 0 || psSwapChain->bFlushCommands == OMAP_TRUE)
+       if (SwapChainHasChanged(psDevInfo, psSwapChain))
        {
-#endif
-               
-               OMAPLFBFlip(psSwapChain, (unsigned long)psBuffer->sSysAddr.uiAddr);
-
                
-               psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
-
-#if defined(SYS_USING_INTERRUPTS)
-               goto ExitTrueUnlock;
+               DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u (PVR Device ID %u): The swap chain has been destroyed\n",
+                       __FUNCTION__, psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID));
        }
-
-       psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulInsertIndex];
-
-       
-       if(psFlipItem->bValid == OMAP_FALSE)
+       else
        {
-               unsigned long ulMaxIndex = psSwapChain->ulBufferCount - 1;
-               
-               if(psSwapChain->ulInsertIndex == psSwapChain->ulRemoveIndex)
-               {
-                       
-                       OMAPLFBFlip(psSwapChain, (unsigned long)psBuffer->sSysAddr.uiAddr);
-
-                       psFlipItem->bFlipped = OMAP_TRUE;
-               }
-               else
-               {
-                       psFlipItem->bFlipped = OMAP_FALSE;
-               }
+               psBuffer->hCmdComplete = (OMAPLFB_HANDLE)hCmdCookie;
+               psBuffer->ulSwapInterval = (unsigned long)psFlipCmd->ui32SwapInterval;
 
-               psFlipItem->hCmdComplete = (OMAP_HANDLE)hCmdCookie;
-               psFlipItem->ulSwapInterval = (unsigned long)psFlipCmd->ui32SwapInterval;
-               psFlipItem->sSysAddr = &psBuffer->sSysAddr;
-               psFlipItem->bValid = OMAP_TRUE;
-
-               psSwapChain->ulInsertIndex++;
-               if(psSwapChain->ulInsertIndex > ulMaxIndex)
-               {
-                       psSwapChain->ulInsertIndex = 0;
-               }
-
-               goto ExitTrueUnlock;
+               OMAPLFBQueueBufferForSwap(psSwapChain, psBuffer);
        }
-       
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-       return IMG_FALSE;
-#endif
 
-ExitTrueUnlock:
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
+       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
        return IMG_TRUE;
 }
 
 
-static OMAP_ERROR InitDev(OMAPLFB_DEVINFO *psDevInfo)
+static OMAPLFB_ERROR OMAPLFBInitFBDev(OMAPLFB_DEVINFO *psDevInfo)
 {
        struct fb_info *psLINFBInfo;
        struct module *psLINFBOwner;
        OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo;
-       OMAP_ERROR eError = OMAP_ERROR_GENERIC;
+       OMAPLFB_ERROR eError = OMAPLFB_ERROR_GENERIC;
        unsigned long FBSize;
+       unsigned long ulLCM;
+       unsigned uiFBDevID = psDevInfo->uiFBDevID;
 
        acquire_console_sem();
 
-       if (fb_idx < 0 || fb_idx >= num_registered_fb)
+       psLINFBInfo = registered_fb[uiFBDevID];
+       if (psLINFBInfo == NULL)
        {
-               eError = OMAP_ERROR_INVALID_DEVICE;
-               goto errRelSem;
+               eError = OMAPLFB_ERROR_INVALID_DEVICE;
+               goto ErrorRelSem;
        }
 
-       psLINFBInfo = registered_fb[fb_idx];
+       FBSize = (psLINFBInfo->screen_size) != 0 ?
+                                       psLINFBInfo->screen_size :
+                                       psLINFBInfo->fix.smem_len;
+
+       
+       if (FBSize == 0 || psLINFBInfo->fix.line_length == 0)
+       {
+               eError = OMAPLFB_ERROR_INVALID_DEVICE;
+               goto ErrorRelSem;
+       }
 
        psLINFBOwner = psLINFBInfo->fbops->owner;
        if (!try_module_get(psLINFBOwner))
        {
                printk(KERN_INFO DRIVER_PREFIX
-                       ": Couldn't get framebuffer module\n");
+                       ": %s: Device %u: Couldn't get framebuffer module\n", __FUNCTION__, uiFBDevID);
 
-               goto errRelSem;
+               goto ErrorRelSem;
        }
 
        if (psLINFBInfo->fbops->fb_open != NULL)
@@ -1146,41 +865,43 @@ static OMAP_ERROR InitDev(OMAPLFB_DEVINFO *psDevInfo)
                if (res != 0)
                {
                        printk(KERN_INFO DRIVER_PREFIX
-                               ": Couldn't open framebuffer: %d\n", res);
+                               " %s: Device %u: Couldn't open framebuffer(%d)\n", __FUNCTION__, uiFBDevID, res);
 
-                       goto errModPut;
+                       goto ErrorModPut;
                }
        }
 
        psDevInfo->psLINFBInfo = psLINFBInfo;
 
-       FBSize = (psLINFBInfo->screen_size) != 0 ?
-                                       psLINFBInfo->screen_size :
-                                       psLINFBInfo->fix.smem_len;
+       ulLCM = LCM(psLINFBInfo->fix.line_length, OMAPLFB_PAGE_SIZE);
+
        DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer physical address: 0x%lx\n",
-                       psLINFBInfo->fix.smem_start));
+                       ": Device %u: Framebuffer physical address: 0x%lx\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->fix.smem_start));
        DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer virtual address: 0x%lx\n",
-                       (unsigned long)psLINFBInfo->screen_base));
+                       ": Device %u: Framebuffer virtual address: 0x%lx\n",
+                       psDevInfo->uiFBDevID, (unsigned long)psLINFBInfo->screen_base));
        DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer size: %lu\n",
-                       FBSize));
+                       ": Device %u: Framebuffer size: %lu\n",
+                       psDevInfo->uiFBDevID, FBSize));
        DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer virtual width: %u\n",
-                       psLINFBInfo->var.xres_virtual));
+                       ": Device %u: Framebuffer virtual width: %u\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->var.xres_virtual));
        DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer virtual height: %u\n",
-                       psLINFBInfo->var.yres_virtual));
+                       ": Device %u: Framebuffer virtual height: %u\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->var.yres_virtual));
        DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer width: %u\n",
-                       psLINFBInfo->var.xres));
+                       ": Device %u: Framebuffer width: %u\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->var.xres));
        DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer height: %u\n",
-                       psLINFBInfo->var.yres));
+                       ": Device %u: Framebuffer height: %u\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->var.yres));
        DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Framebuffer stride: %u\n",
-                       psLINFBInfo->fix.line_length));
+                       ": Device %u: Framebuffer stride: %u\n",
+                       psDevInfo->uiFBDevID, psLINFBInfo->fix.line_length));
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+                       ": Device %u: LCM of stride and page size: %lu\n",
+                       psDevInfo->uiFBDevID, ulLCM));
 
        
        psPVRFBInfo->sSysAddr.uiAddr = psLINFBInfo->fix.smem_start;
@@ -1191,13 +912,9 @@ static OMAP_ERROR InitDev(OMAPLFB_DEVINFO *psDevInfo)
        psPVRFBInfo->ulByteStride =  psLINFBInfo->fix.line_length;
        psPVRFBInfo->ulFBSize = FBSize;
        psPVRFBInfo->ulBufferSize = psPVRFBInfo->ulHeight * psPVRFBInfo->ulByteStride;
-
-#ifdef CONFIG_OMAP2_DSS
-    psPVRFBInfo->ulRoundedBufferSize = psPVRFBInfo->ulBufferSize;
-#else
        
-       psPVRFBInfo->ulRoundedBufferSize = OMAPLFB_PAGE_ROUNDUP(psPVRFBInfo->ulBufferSize);
-#endif
+       psPVRFBInfo->ulRoundedBufferSize = RoundUpToMultiple(psPVRFBInfo->ulBufferSize, ulLCM);
+
        if(psLINFBInfo->var.bits_per_pixel == 16)
        {
                if((psLINFBInfo->var.red.length == 5) &&
@@ -1212,7 +929,7 @@ static OMAP_ERROR InitDev(OMAPLFB_DEVINFO *psDevInfo)
                }
                else
                {
-                       printk("Unknown FB format\n");
+                       printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown FB format\n", __FUNCTION__, uiFBDevID);
                }
        }
        else if(psLINFBInfo->var.bits_per_pixel == 32)
@@ -1229,12 +946,12 @@ static OMAP_ERROR InitDev(OMAPLFB_DEVINFO *psDevInfo)
                }
                else
                {
-                       printk("Unknown FB format\n");
+                       printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown FB format\n", __FUNCTION__, uiFBDevID);
                }
        }       
        else
        {
-               printk("Unknown FB format\n");
+               printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown FB format\n", __FUNCTION__, uiFBDevID);
        }
 
        psDevInfo->sFBInfo.ulPhysicalWidthmm =
@@ -1246,20 +963,19 @@ static OMAP_ERROR InitDev(OMAPLFB_DEVINFO *psDevInfo)
        
        psDevInfo->sFBInfo.sSysAddr.uiAddr = psPVRFBInfo->sSysAddr.uiAddr;
        psDevInfo->sFBInfo.sCPUVAddr = psPVRFBInfo->sCPUVAddr;
-#ifdef CONFIG_OMAP2_DSS
-        INIT_WORK (&wq_flipdss2.work, dss2_pan_display);
-#endif
-       eError = OMAP_OK;
-       goto errRelSem;
 
-errModPut:
+       eError = OMAPLFB_OK;
+       goto ErrorRelSem;
+
+ErrorModPut:
        module_put(psLINFBOwner);
-errRelSem:
+ErrorRelSem:
        release_console_sem();
+
        return eError;
 }
 
-static void DeInitDev(OMAPLFB_DEVINFO *psDevInfo)
+static void OMAPLFBDeInitFBDev(OMAPLFB_DEVINFO *psDevInfo)
 {
        struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
        struct module *psLINFBOwner;
@@ -1278,109 +994,71 @@ static void DeInitDev(OMAPLFB_DEVINFO *psDevInfo)
        release_console_sem();
 }
 
-OMAP_ERROR OMAPLFBInit(void)
+static OMAPLFB_DEVINFO *OMAPLFBInitDev(unsigned uiFBDevID)
 {
-       OMAPLFB_DEVINFO         *psDevInfo;
+       PFN_CMD_PROC            pfnCmdProcList[OMAPLFB_COMMAND_COUNT];
+       IMG_UINT32              aui32SyncCountList[OMAPLFB_COMMAND_COUNT][2];
+       OMAPLFB_DEVINFO         *psDevInfo = NULL;
 
-       psDevInfo = GetAnchorPtr();
        
-       if (psDevInfo == NULL)
-       {
-               PFN_CMD_PROC                    pfnCmdProcList[OMAPLFB_COMMAND_COUNT];
-               IMG_UINT32                              aui32SyncCountList[OMAPLFB_COMMAND_COUNT][2];
-               
-               psDevInfo = (OMAPLFB_DEVINFO *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_DEVINFO));
-
-               if(!psDevInfo)
-               {
-                       return (OMAP_ERROR_OUT_OF_MEMORY);
-               }
+       psDevInfo = (OMAPLFB_DEVINFO *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_DEVINFO));
 
-               
-               memset(psDevInfo, 0, sizeof(OMAPLFB_DEVINFO));
+       if(psDevInfo == NULL)
+       {
+               printk(KERN_ERR DRIVER_PREFIX
+                       ": %s: Device %u: Couldn't allocate device information structure\n", __FUNCTION__, uiFBDevID);
 
-               
-               SetAnchorPtr((void*)psDevInfo);
+               goto ErrorExit;
+       }
 
-               
-               psDevInfo->ulRefCount = 0;
+       
+       memset(psDevInfo, 0, sizeof(OMAPLFB_DEVINFO));
 
-#ifdef FBDEV_PRESENT           
-               if(InitDev(psDevInfo) != OMAP_OK)
-               {
-                       return (OMAP_ERROR_INIT_FAILURE);
-               }
-#endif
-               if(OMAPLFBGetLibFuncAddr ("PVRGetDisplayClassJTable", &pfnGetPVRJTable) != OMAP_OK)
-               {
-                       return (OMAP_ERROR_INIT_FAILURE);
-               }
+       psDevInfo->uiFBDevID = uiFBDevID;
 
+       
+       if(!(*gpfnGetPVRJTable)(&psDevInfo->sPVRJTable))
+       {
+               goto ErrorFreeDevInfo;
+       }
+#ifdef FBDEV_PRESENT
+       
+       if(OMAPLFBInitFBDev(psDevInfo) != OMAPLFB_OK)
+       {
                
-               if(!(*pfnGetPVRJTable)(&psDevInfo->sPVRJTable))
-               {
-                       return (OMAP_ERROR_INIT_FAILURE);
-               }
+               goto ErrorFreeDevInfo;
+       }
 
-                               
-               spin_lock_init(&psDevInfo->sSwapChainLock);
+       psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = (IMG_UINT32)(psDevInfo->sFBInfo.ulFBSize / psDevInfo->sFBInfo.ulRoundedBufferSize);
+       if (psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers != 0)
+       {
+               psDevInfo->sDisplayInfo.ui32MaxSwapChains = 1;
+               psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 1;
+       }
 
-               psDevInfo->psSwapChain = 0;
-               psDevInfo->bFlushCommands = OMAP_FALSE;
-               psDevInfo->bDeviceSuspended = OMAP_FALSE;
+       psDevInfo->sDisplayInfo.ui32PhysicalWidthmm = psDevInfo->sFBInfo.ulPhysicalWidthmm;
+       psDevInfo->sDisplayInfo.ui32PhysicalHeightmm = psDevInfo->sFBInfo.ulPhysicalHeightmm;
 
-#ifdef FBDEV_PRESENT
-               psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = (IMG_UINT32)(psDevInfo->sFBInfo.ulFBSize / psDevInfo->sFBInfo.ulRoundedBufferSize);
-#if !defined (SUPPORT_TI_DSS_FW)
-                /* DSS2 have trouble with ui32MaxSwapChainBuffers > 3 */
-                if (psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers > 3)
-                        psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = 3;
-#endif
-#if 1
-                /* for fb_pan_display to work, yres_virtual should be set to number of buffers multiplied yres */  
-                psDevInfo->psLINFBInfo->var.yres_virtual = psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers * psDevInfo->psLINFBInfo->var.yres;
-                if (fb_set_var(psDevInfo->psLINFBInfo, &psDevInfo->psLINFBInfo->var) != 0)
-                {
-                   printk(KERN_INFO DRIVER_PREFIX ": Couldn't set framebuffer paramter: ");
-
-                }
-#endif
+       strncpy(psDevInfo->sDisplayInfo.szDisplayName, DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
 
+       psDevInfo->sDisplayFormat.pixelformat = psDevInfo->sFBInfo.ePixelFormat;
+       psDevInfo->sDisplayDim.ui32Width      = (IMG_UINT32)psDevInfo->sFBInfo.ulWidth;
+       psDevInfo->sDisplayDim.ui32Height     = (IMG_UINT32)psDevInfo->sFBInfo.ulHeight;
+       psDevInfo->sDisplayDim.ui32ByteStride = (IMG_UINT32)psDevInfo->sFBInfo.ulByteStride;
 
-               if (psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers == 0)
-               {
-                       psDevInfo->sDisplayInfo.ui32MaxSwapChains = 0;
-                       psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 0;
-               }
-               else
-               {
-                       psDevInfo->sDisplayInfo.ui32MaxSwapChains = 1;
-                       psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 3;
-               }
-               psDevInfo->sDisplayInfo.ui32MinSwapInterval = 0;
-
-               psDevInfo->sDisplayInfo.ui32PhysicalWidthmm = psDevInfo->sFBInfo.ulPhysicalWidthmm;
-               psDevInfo->sDisplayInfo.ui32PhysicalHeightmm = psDevInfo->sFBInfo.ulPhysicalHeightmm;
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+               ": Device %u: Maximum number of swap chain buffers: %u\n",
+               psDevInfo->uiFBDevID, psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers));
 
-               strncpy(psDevInfo->sDisplayInfo.szDisplayName, DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
        
-               psDevInfo->sDisplayFormat.pixelformat = psDevInfo->sFBInfo.ePixelFormat;
-               psDevInfo->sDisplayDim.ui32Width      = (IMG_UINT32)psDevInfo->sFBInfo.ulWidth;
-               psDevInfo->sDisplayDim.ui32Height     = (IMG_UINT32)psDevInfo->sFBInfo.ulHeight;
-               psDevInfo->sDisplayDim.ui32ByteStride = (IMG_UINT32)psDevInfo->sFBInfo.ulByteStride;
-
-               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
-                       ": Maximum number of swap chain buffers: %u\n",
-                       psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers));
-
-               
-               psDevInfo->sSystemBuffer.sSysAddr = psDevInfo->sFBInfo.sSysAddr;
-               psDevInfo->sSystemBuffer.sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr;
-               psDevInfo->sSystemBuffer.ulBufferSize = psDevInfo->sFBInfo.ulRoundedBufferSize;
+       psDevInfo->sSystemBuffer.sSysAddr = psDevInfo->sFBInfo.sSysAddr;
+       psDevInfo->sSystemBuffer.sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr;
+       psDevInfo->sSystemBuffer.psDevInfo = psDevInfo;
 
+       OMAPLFBInitBufferForSwap(&psDevInfo->sSystemBuffer);
 #else
                 psDevInfo->sSystemBuffer.sCPUVAddr = 0x100;
-                psDevInfo->sSystemBuffer.ulBufferSize = 600*3200;
+//                psDevInfo->sSystemBuffer.ulBufferSize = 600*3200;
 
                 psDevInfo->sDisplayFormat.pixelformat = 20;
                 psDevInfo->sFBInfo.ulWidth      =  800;
@@ -1388,166 +1066,171 @@ OMAP_ERROR OMAPLFBInit(void)
                 psDevInfo->sFBInfo.ulByteStride =  3200;
                 psDevInfo->sFBInfo.ulFBSize     =  8388608;
                 psDevInfo->sFBInfo.ulBufferSize = 600*3200;
-
 #endif
 
-               psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
-               psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
-               psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
-               psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
-               psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
-               psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
-               psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
-               psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
-               psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
-               psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
-               psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
-               psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
-               psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
-               psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
-               psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
-               psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
-               psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
-               psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
 
-               
-               if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice (
-                       &psDevInfo->sDCJTable,
-                       &psDevInfo->uiDeviceID ) != PVRSRV_OK)
-               {
-                       return (OMAP_ERROR_DEVICE_REGISTER_FAILED);
-               }
-               
-               
-               pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
+       psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
+       psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
+       psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
+       psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
+       psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
+       psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
+       psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
+       psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
+       psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
+       psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
+       psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
+       psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
+       psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
+       psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
+       psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
+       psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
+       psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
+       psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
 
-               
-               aui32SyncCountList[DC_FLIP_COMMAND][0] = 0; 
-               aui32SyncCountList[DC_FLIP_COMMAND][1] = 2; 
+       
+       if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice(
+               &psDevInfo->sDCJTable,
+               &psDevInfo->uiPVRDevID) != PVRSRV_OK)
+       {
+               printk(KERN_ERR DRIVER_PREFIX
+                       ": %s: Device %u: PVR Services device registration failed\n", __FUNCTION__, uiFBDevID);
 
-               
+               goto ErrorDeInitFBDev;
+       }
+       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+               ": Device %u: PVR Device ID: %u\n",
+               psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID));
+       
+       
+       pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
 
+       
+       aui32SyncCountList[DC_FLIP_COMMAND][0] = 0; 
+       aui32SyncCountList[DC_FLIP_COMMAND][1] = 2; 
+
+       
 
 
-               if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList (psDevInfo->uiDeviceID,
-                                                                                                                               &pfnCmdProcList[0],
-                                                                                                                               aui32SyncCountList,
-                                                                                                                               OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
-               {
-                       printk(KERN_WARNING DRIVER_PREFIX ": Can't register callback\n");
-                       return (OMAP_ERROR_CANT_REGISTER_CALLBACK);
-               }
 
+       if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList(psDevInfo->uiPVRDevID,
+                                                                                                                       &pfnCmdProcList[0],
+                                                                                                                       aui32SyncCountList,
+                                                                                                                       OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
+       {
+               printk(KERN_ERR DRIVER_PREFIX
+                       ": %s: Device %u: Couldn't register command processing functions with PVR Services\n", __FUNCTION__, uiFBDevID);
+               goto ErrorUnregisterDevice;
        }
 
-       
-       psDevInfo->ulRefCount++;
+       OMAPLFBCreateSwapChainLockInit(psDevInfo);
 
-       
-       return (OMAP_OK);
-       
-       }
+       OMAPLFBAtomicBoolInit(&psDevInfo->sBlanked, OMAPLFB_FALSE);
+       OMAPLFBAtomicIntInit(&psDevInfo->sBlankEvents, 0);
+       OMAPLFBAtomicBoolInit(&psDevInfo->sFlushCommands, OMAPLFB_FALSE);
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+       OMAPLFBAtomicBoolInit(&psDevInfo->sEarlySuspendFlag, OMAPLFB_FALSE);
+#endif
+#if defined(SUPPORT_DRI_DRM)
+       OMAPLFBAtomicBoolInit(&psDevInfo->sLeaveVT, OMAPLFB_FALSE);
+#endif
+       return psDevInfo;
+
+ErrorUnregisterDevice:
+       (void)psDevInfo->sPVRJTable.pfnPVRSRVRemoveDCDevice(psDevInfo->uiPVRDevID);
+ErrorDeInitFBDev:
+       OMAPLFBDeInitFBDev(psDevInfo);
+ErrorFreeDevInfo:
+       OMAPLFBFreeKernelMem(psDevInfo);
+ErrorExit:
+       return NULL;
+}
 
-OMAP_ERROR OMAPLFBDeinit(void)
+OMAPLFB_ERROR OMAPLFBInit(void)
 {
-       OMAPLFB_DEVINFO *psDevInfo, *psDevFirst;
-
-       psDevFirst = GetAnchorPtr();
-       psDevInfo = psDevFirst;
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
+       unsigned uiDevicesFound = 0;
 
-       
-       if (psDevInfo == NULL)
+       if(OMAPLFBGetLibFuncAddr ("PVRGetDisplayClassJTable", &gpfnGetPVRJTable) != OMAPLFB_OK)
        {
-               return (OMAP_ERROR_GENERIC);
+               return OMAPLFB_ERROR_INIT_FAILURE;
        }
 
        
-       psDevInfo->ulRefCount--;
-
-       if (psDevInfo->ulRefCount == 0)
+       for(i = uiMaxFBDevIDPlusOne; i-- != 0;)
        {
-               
-               PVRSRV_DC_DISP2SRV_KMJTABLE     *psJTable = &psDevInfo->sPVRJTable;
-
-               if (psDevInfo->sPVRJTable.pfnPVRSRVRemoveCmdProcList (psDevInfo->uiDeviceID, OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
-               {
-                       return (OMAP_ERROR_GENERIC);
-               }
+               OMAPLFB_DEVINFO *psDevInfo = OMAPLFBInitDev(i);
 
-               
-               if (psJTable->pfnPVRSRVRemoveDCDevice(psDevInfo->uiDeviceID) != PVRSRV_OK)
+               if (psDevInfo != NULL)
                {
-                       return (OMAP_ERROR_GENERIC);
+                       
+                       OMAPLFBSetDevInfoPtr(psDevInfo->uiFBDevID, psDevInfo);
+                       uiDevicesFound++;
                }
-#ifdef FBDEV_PRESENT           
-               DeInitDev(psDevInfo);
-#endif
-               
-               OMAPLFBFreeKernelMem(psDevInfo);
        }
-       
-       
-       SetAnchorPtr(NULL);
 
-       
-       return (OMAP_OK);
+       return (uiDevicesFound != 0) ? OMAPLFB_OK : OMAPLFB_ERROR_INIT_FAILURE;
 }
 
-
-#if defined(LDM_PLATFORM)
-void OMAPLFBDriverSuspend(void)
+static OMAPLFB_BOOL OMAPLFBDeInitDev(OMAPLFB_DEVINFO *psDevInfo)
 {
-       OMAPLFB_DEVINFO *psDevInfo = GetAnchorPtr();
-       unsigned long    ulLockFlags;
+       PVRSRV_DC_DISP2SRV_KMJTABLE *psPVRJTable = &psDevInfo->sPVRJTable;
 
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
+       OMAPLFBCreateSwapChainLockDeInit(psDevInfo);
 
-       if (psDevInfo->bDeviceSuspended)
+       OMAPLFBAtomicBoolDeInit(&psDevInfo->sBlanked);
+       OMAPLFBAtomicIntDeInit(&psDevInfo->sBlankEvents);
+       OMAPLFBAtomicBoolDeInit(&psDevInfo->sFlushCommands);
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+       OMAPLFBAtomicBoolDeInit(&psDevInfo->sEarlySuspendFlag);
+#endif
+#if defined(SUPPORT_DRI_DRM)
+       OMAPLFBAtomicBoolDeInit(&psDevInfo->sLeaveVT);
+#endif
+       psPVRJTable = &psDevInfo->sPVRJTable;
+
+       if (psPVRJTable->pfnPVRSRVRemoveCmdProcList (psDevInfo->uiPVRDevID, OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
        {
-               goto ExitUnlock;
+               printk(KERN_ERR DRIVER_PREFIX
+                       ": %s: Device %u: PVR Device %u: Couldn't unregister command processing functions\n", __FUNCTION__, psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID);
+               return OMAPLFB_FALSE;
        }
-       psDevInfo->bDeviceSuspended = OMAP_TRUE;
 
        
-       SetFlushStateInternalNoLock(psDevInfo, OMAP_TRUE);
-
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
-
-#if defined (SUPPORT_TI_DSS_FW)        
-       if (psDevInfo->psSwapChain != NULL)
+       if (psPVRJTable->pfnPVRSRVRemoveDCDevice(psDevInfo->uiPVRDevID) != PVRSRV_OK)
        {
-               OMAPLFBDisableDisplayRegisterAccess();
+               printk(KERN_ERR DRIVER_PREFIX
+                       ": %s: Device %u: PVR Device %u: Couldn't remove device from PVR Services\n", __FUNCTION__, psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID);
+               return OMAPLFB_FALSE;
        }
+#ifdef FBDEV_PRESENT   
+       OMAPLFBDeInitFBDev(psDevInfo);
 #endif
-       return;
+       OMAPLFBSetDevInfoPtr(psDevInfo->uiFBDevID, NULL);
+
+       
+       OMAPLFBFreeKernelMem(psDevInfo);
 
-ExitUnlock:
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
+       return OMAPLFB_TRUE;
 }
 
-void OMAPLFBDriverResume(void)
+OMAPLFB_ERROR OMAPLFBDeInit(void)
 {
-       OMAPLFB_DEVINFO *psDevInfo = GetAnchorPtr();
-       unsigned long    ulLockFlags;
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
+       OMAPLFB_BOOL bError = OMAPLFB_FALSE;
 
-       if (psDevInfo->bDeviceSuspended == OMAP_FALSE)
+       for(i = 0; i < uiMaxFBDevIDPlusOne; i++)
        {
-               return;
-       }
-#if defined (SUPPORT_TI_DSS_FW)
-       if (psDevInfo->psSwapChain != NULL)
-       {
-               OMAPLFBEnableDisplayRegisterAccess();
-       }
-#endif
-       spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
+               OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
 
-       
-       SetFlushStateInternalNoLock(psDevInfo, OMAP_FALSE);
-
-       psDevInfo->bDeviceSuspended = OMAP_FALSE;
+               if (psDevInfo != NULL)
+               {
+                       bError |= !OMAPLFBDeInitDev(psDevInfo);
+               }
+       }
 
-       spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
+       return (bError) ? OMAPLFB_ERROR_INIT_FAILURE : OMAPLFB_OK;
 }
-#endif
 
index dc219cd..27cb1db 100644 (file)
 #endif
 
 #include <linux/version.h>
-#include <linux/module.h>
-
-#include <linux/pci.h>
-#include <asm/uaccess.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
 
-#include <plat/ti81xx-vpss.h>
+#include <asm/atomic.h>
 
-#if defined(LDM_PLATFORM)
-#include <linux/platform_device.h>
-#endif 
+#if defined(SUPPORT_DRI_DRM)
+#include <drm/drmP.h>
+#else
+#include <linux/module.h>
+#endif
 
-#if defined (SUPPORT_TI_DSS_FW)
-#include <asm/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hardirq.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/fb.h>
+#include <linux/console.h>
+//#include <linux/omapfb.h>
+#include <linux/ti81xxfb.h>
+#include <linux/mutex.h>
 
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31))
-#include <plat/display.h>
-#else 
-#include <mach/display.h>
-#endif 
-#else 
-#include <asm/arch-omap/display.h>
-#endif 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+#define PVR_OMAPFB3_NEEDS_PLAT_VRFB_H
+#endif
 
+#if defined(PVR_OMAPFB3_NEEDS_PLAT_VRFB_H)
+# include <plat/vrfb.h>
 #else
-#if !defined (CONFIG_OMAP2_DSS)
-#define DISPC_IRQ_VSYNC 0x0002
-extern int omap_dispc_request_irq(unsigned long, void (*)(void *), void *);
-extern void omap_dispc_free_irq(unsigned long, void (*)(void *), void *);
-extern void omap_dispc_set_plane_base(int plane, IMG_UINT32 phys_addr);
-#else
-#include <plat/display.h>
-#include <linux/console.h>
-#include <linux/fb.h>
-static omap_dispc_isr_t *pOMAPLFBVSyncISRHandle = NULL;
+# if defined(PVR_OMAPFB3_NEEDS_MACH_VRFB_H)
+#  include <mach/vrfb.h>
+# endif
 #endif
-#endif
-
 
+#if defined(DEBUG)
+#define        PVR_DEBUG DEBUG
+#undef DEBUG
+#endif
+//#include <omapfb/omapfb.h>
+#include <../drivers/video/ti81xx/ti81xxfb/fbpriv.h>
+#if defined(DEBUG)
+#undef DEBUG
+#endif
+#if defined(PVR_DEBUG)
+#define        DEBUG PVR_DEBUG
+#undef PVR_DEBUG
+#endif
 
 #include "img_defs.h"
 #include "servicesext.h"
 #include "kerneldisplay.h"
 #include "omaplfb.h"
 #include "pvrmodule.h"
+#if defined(SUPPORT_DRI_DRM)
+#include "pvr_drm.h"
+#include "3rdparty_dc_drm_shared.h"
+#endif
+
+#if 0
+#if !defined(PVR_LINUX_USING_WORKQUEUES)
+#error "PVR_LINUX_USING_WORKQUEUES must be defined"
+#endif
+#endif
 
 MODULE_SUPPORTED_DEVICE(DEVNAME);
 
-#define unref__ __attribute__ ((unused))
+#if 0
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+#define OMAP_DSS_DRIVER(drv, dev) struct omap_dss_driver *drv = (dev) != NULL ? (dev)->driver : NULL
+#define OMAP_DSS_MANAGER(man, dev) struct omap_overlay_manager *man = (dev) != NULL ? (dev)->manager : NULL
+#define        WAIT_FOR_VSYNC(man)     ((man)->wait_for_vsync)
+#else
+#define OMAP_DSS_DRIVER(drv, dev) struct ti81xxfb_device  *drv = (dev)
+#define OMAP_DSS_MANAGER(man, dev) struct ti81xxfb_device *man = (dev)
+#define        WAIT_FOR_VSYNC(man)     ((man)->wait_vsync)
+#endif
+#endif
+
+
+#define OMAP_DSS_DRIVER(drv, dev) struct ti81xxfb_device  *drv = (dev)
+#define OMAP_DSS_MANAGER(man, dev) struct ti81xxfb_device *man = (dev)
+#define WAIT_FOR_VSYNC(man)     ((man)->wait_vsync)
+
+
+
 
 void *OMAPLFBAllocKernelMem(unsigned long ulSize)
 {
@@ -92,366 +123,673 @@ void OMAPLFBFreeKernelMem(void *pvMem)
        kfree(pvMem);
 }
 
-
-OMAP_ERROR OMAPLFBGetLibFuncAddr (char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
+void OMAPLFBCreateSwapChainLockInit(OMAPLFB_DEVINFO *psDevInfo)
 {
-       if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
-       {
-               return (OMAP_ERROR_INVALID_PARAMS);
-       }
+       mutex_init(&psDevInfo->sCreateSwapChainMutex);
+}
 
-       
-       *ppfnFuncTable = PVRGetDisplayClassJTable;
+void OMAPLFBCreateSwapChainLockDeInit(OMAPLFB_DEVINFO *psDevInfo)
+{
+       mutex_destroy(&psDevInfo->sCreateSwapChainMutex);
+}
 
-       return (OMAP_OK);
+void OMAPLFBCreateSwapChainLock(OMAPLFB_DEVINFO *psDevInfo)
+{
+       mutex_lock(&psDevInfo->sCreateSwapChainMutex);
 }
 
+void OMAPLFBCreateSwapChainUnLock(OMAPLFB_DEVINFO *psDevInfo)
+{
+       mutex_unlock(&psDevInfo->sCreateSwapChainMutex);
+}
 
-#if defined(SYS_USING_INTERRUPTS)
+void OMAPLFBAtomicBoolInit(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal)
+{
+       atomic_set(psAtomic, (int)bVal);
+}
 
-#if defined(SUPPORT_OMAP3430_OMAPFB3)
+void OMAPLFBAtomicBoolDeInit(OMAPLFB_ATOMIC_BOOL *psAtomic)
+{
+}
 
-static void OMAPLFBVSyncISR(void *arg, u32 mask)
+void OMAPLFBAtomicBoolSet(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal)
 {
-       OMAPLFB_SWAPCHAIN *psSwapChain= (OMAPLFB_SWAPCHAIN *)arg;
-       (void) OMAPLFBVSyncIHandler(psSwapChain);
-     //   printk (" VSync ISR \n");
+       atomic_set(psAtomic, (int)bVal);
 }
 
-static inline int OMAPLFBRegisterVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
+OMAPLFB_BOOL OMAPLFBAtomicBoolRead(OMAPLFB_ATOMIC_BOOL *psAtomic)
 {
-       return omap_dispc_register_isr(OMAPLFBVSyncISR, psSwapChain,
-                                                                  DISPC_IRQ_VSYNC);
+       return (OMAPLFB_BOOL)atomic_read(psAtomic);
 }
 
-static inline int OMAPLFBUnregisterVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
+void OMAPLFBAtomicIntInit(OMAPLFB_ATOMIC_INT *psAtomic, int iVal)
 {
-       return omap_dispc_unregister_isr(OMAPLFBVSyncISR, psSwapChain,
-                                                                        DISPC_IRQ_VSYNC);
+       atomic_set(psAtomic, iVal);
 }
 
-#else 
+void OMAPLFBAtomicIntDeInit(OMAPLFB_ATOMIC_INT *psAtomic)
+{
+}
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
-static void OMAPLFBVSyncISR(void *arg)
-#else
-static void OMAPLFBVSyncISR(void *arg, struct pt_regs unref__ *regs)
-#endif
+void OMAPLFBAtomicIntSet(OMAPLFB_ATOMIC_INT *psAtomic, int iVal)
 {
-       OMAPLFB_SWAPCHAIN *psSwapChain= (OMAPLFB_SWAPCHAIN *)arg;
-       (void) OMAPLFBVSyncIHandler(psSwapChain);
+       atomic_set(psAtomic, iVal);
 }
 
-static inline int OMAPLFBRegisterVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
+int OMAPLFBAtomicIntRead(OMAPLFB_ATOMIC_INT *psAtomic)
 {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
-       return omap_dispc_request_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain);
-#else
-       return omap2_disp_register_isr(OMAPLFBVSyncISR, psSwapChain,
-                                                                  DISPC_IRQSTATUS_VSYNC);
-#endif
+       return atomic_read(psAtomic);
 }
 
-static inline int OMAPLFBUnregisterVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
+void OMAPLFBAtomicIntInc(OMAPLFB_ATOMIC_INT *psAtomic)
 {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
-       omap_dispc_free_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain);
-       return 0;
-#else
-       return omap2_disp_unregister_isr(OMAPLFBVSyncISR);
-#endif
+       atomic_inc(psAtomic);
 }
 
-#endif 
+OMAPLFB_ERROR OMAPLFBGetLibFuncAddr (char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
+{
+       if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
+       {
+               return (OMAPLFB_ERROR_INVALID_PARAMS);
+       }
 
-#endif 
+       
+       *ppfnFuncTable = PVRGetDisplayClassJTable;
 
-#if !defined (SUPPORT_TI_DSS_FW)
+       return (OMAPLFB_OK);
+}
 
-IMG_VOID OMAPLFBEnableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
+void OMAPLFBQueueBufferForSwap(OMAPLFB_SWAPCHAIN *psSwapChain, OMAPLFB_BUFFER *psBuffer)
 {
-        if (pOMAPLFBVSyncISRHandle == NULL)
-                OMAPLFBInstallVSyncISR (psSwapChain);
+       int res = queue_work(psSwapChain->psWorkQueue, &psBuffer->sWork);
+
+       if (res == 0)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Buffer already on work queue\n", __FUNCTION__, psSwapChain->uiFBDevID);
+       }
 }
 
-IMG_VOID OMAPLFBDisableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
+static void WorkQueueHandler(struct work_struct *psWork)
 {
-        if (pOMAPLFBVSyncISRHandle != NULL)
-                OMAPLFBUninstallVSyncISR (psSwapChain);
+       OMAPLFB_BUFFER *psBuffer = container_of(psWork, OMAPLFB_BUFFER, sWork);
+
+       OMAPLFBSwapHandler(psBuffer);
 }
+
+OMAPLFB_ERROR OMAPLFBCreateSwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
+{      
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
+        psSwapChain->psWorkQueue = __create_workqueue(DEVNAME, 1, 1, 1);
 #else
+psSwapChain->psWorkQueue = create_workqueue(DEVNAME);
+#endif
+       if (psSwapChain->psWorkQueue == NULL)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: create_singlethreaded_workqueue failed\n", __FUNCTION__, psSwapChain->uiFBDevID);
 
-static void OMAPLFBVSyncWriteReg(OMAPLFB_SWAPCHAIN *psSwapChain, unsigned long ulOffset, unsigned long ulValue)
-{
-       void *pvRegAddr = (void *)((char *)psSwapChain->pvRegs + ulOffset);
+               return (OMAPLFB_ERROR_INIT_FAILURE);
+       }
 
-       
-       writel(ulValue, pvRegAddr);
+       return (OMAPLFB_OK);
 }
 
-static unsigned long OMAPLFBVSyncReadReg(OMAPLFB_SWAPCHAIN *psSwapChain, unsigned long ulOffset)
+void OMAPLFBInitBufferForSwap(OMAPLFB_BUFFER *psBuffer)
 {
-       return readl((char *)psSwapChain->pvRegs + ulOffset);
+       INIT_WORK(&psBuffer->sWork, WorkQueueHandler);
 }
 
-void OMAPLFBEnableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
+void OMAPLFBDestroySwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
 {
-#if defined(SYS_USING_INTERRUPTS)
-       
-       unsigned long ulInterruptEnable  = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_IRQENABLE);
-       ulInterruptEnable |= OMAPLCD_INTMASK_VSYNC;
-       OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_IRQENABLE, ulInterruptEnable );
-#endif
+       destroy_workqueue(psSwapChain->psWorkQueue);
 }
 
-void OMAPLFBDisableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
+void OMAPLFBFlip(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_BUFFER *psBuffer)
 {
-#if defined(SYS_USING_INTERRUPTS)
+       struct fb_var_screeninfo sFBVar;
+       int res;
+       unsigned long ulYResVirtual;
+
+       acquire_console_sem();
+
+       sFBVar = psDevInfo->psLINFBInfo->var;
+
+       sFBVar.xoffset = 0;
+       sFBVar.yoffset = psBuffer->ulYOffset;
+
+       ulYResVirtual = psBuffer->ulYOffset + sFBVar.yres;
+
        
-       unsigned long ulInterruptEnable = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_IRQENABLE);
-       ulInterruptEnable &= ~(OMAPLCD_INTMASK_VSYNC);
-       OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_IRQENABLE, ulInterruptEnable);
-#endif
+       if (sFBVar.xres_virtual != sFBVar.xres || sFBVar.yres_virtual < ulYResVirtual)
+       {
+               sFBVar.xres_virtual = sFBVar.xres;
+               sFBVar.yres_virtual = ulYResVirtual;
+
+               sFBVar.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+
+               res = fb_set_var(psDevInfo->psLINFBInfo, &sFBVar);
+               if (res != 0)
+               {
+                       printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: fb_set_var failed (Y Offset: %lu, Error: %d)\n", __FUNCTION__, psDevInfo->uiFBDevID, psBuffer->ulYOffset, res);
+               }
+       }
+       else
+       {
+               res = fb_pan_display(psDevInfo->psLINFBInfo, &sFBVar);
+               if (res != 0)
+               {
+                       printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: fb_pan_display failed (Y Offset: %lu, Error: %d)\n", __FUNCTION__, psDevInfo->uiFBDevID, psBuffer->ulYOffset, res);
+               }
+       }
+
+       release_console_sem();
 }
-#endif
 
-#if !defined (SUPPORT_TI_DSS_FW)
-OMAP_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
+OMAPLFB_UPDATE_MODE OMAPLFBGetUpdateMode(OMAPLFB_DEVINFO *psDevInfo)
 {
-#if !defined (CONFIG_OMAP2_DSS)
-        if (omap_dispc_request_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain) != 0)
-#else
-        int ret;
-#ifdef FBDEV_PRESENT
-       ret = vps_grpx_register_isr ((vsync_callback_t)OMAPLFBVSyncISR, psSwapChain, 0); // fb_idx = 0
-#endif
-//        if (ret == 0) 
-             pOMAPLFBVSyncISRHandle  = (omap_dispc_isr_t *)NULL;
-  //      else 
-    //        pOMAPLFBVSyncISRHandle = NULL;
 
-        if (pOMAPLFBVSyncISRHandle != NULL)
-#endif
-                return PVRSRV_ERROR_OUT_OF_MEMORY; /* not worth a proper mapping */
-        return OMAP_OK;
-}
+#if 0
 
+ //    struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+       
+        struct ti81xxfb_device *psDSSDev = FB2TFB(psDevInfo->psLINFBInfo);
 
-OMAP_ERROR OMAPLFBUninstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain)
-{
-#if !defined (CONFIG_OMAP2_DSS)
-        omap_dispc_free_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain);
-#else
-        int ret;
-#ifdef FBDEV_PRESENT
-        ret = vps_grpx_unregister_isr((vsync_callback_t) OMAPLFBVSyncISR, (void *)psSwapChain, 0); // fb_idx = 0
-#endif
-#endif
-        return OMAP_OK;
-}
+       OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
 
+       enum omap_dss_update_mode eMode;
 
-IMG_VOID OMAPLFBFlip(OMAPLFB_SWAPCHAIN *psSwapChain,
-                                                  IMG_UINT32 aPhyAddr)
-{
-#if !defined (CONFIG_OMAP2_DSS)
-        omap_dispc_set_plane_base(0, aPhyAddr);
-#else
-        OMAPLFBFlipDSS2 (psSwapChain, aPhyAddr);
+       if (psDSSDrv == NULL || psDSSDrv->get_update_mode == NULL)
+       {
+               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Can't get update mode\n", __FUNCTION__, psDevInfo->uiFBDevID));
+               return OMAPLFB_UPDATE_MODE_UNDEFINED;
+       }
+
+       eMode = psDSSDrv->get_update_mode(psDSSDev);
+       switch(eMode)
+       {
+               case OMAP_DSS_UPDATE_AUTO:
+                       return OMAPLFB_UPDATE_MODE_AUTO;
+               case OMAP_DSS_UPDATE_MANUAL:
+                       return OMAPLFB_UPDATE_MODE_MANUAL;
+               case OMAP_DSS_UPDATE_DISABLED:
+                       return OMAPLFB_UPDATE_MODE_DISABLED;
+               default:
+                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown update mode (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, eMode));
+                       break;
+       }
+
+       return OMAPLFB_UPDATE_MODE_UNDEFINED;
 #endif
+
+return OMAPLFB_UPDATE_MODE_AUTO;
 }
-#else
 
-OMAP_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
+OMAPLFB_BOOL OMAPLFBSetUpdateMode(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_UPDATE_MODE eMode)
 {
-#if defined(SYS_USING_INTERRUPTS)
-       OMAPLFBDisableVSyncInterrupt(psSwapChain);
 
-       if (OMAPLFBRegisterVSyncISR(psSwapChain))
+#if 0
+//     struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+
+        struct ti81xxfb_device *psDSSDev = FB2TFB(psDevInfo->psLINFBInfo);
+       
+        OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
+       enum omap_dss_update_mode eDSSMode;
+       int res;
+
+       if (psDSSDrv == NULL || psDSSDrv->set_update_mode == NULL)
+       {
+               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Can't set update mode\n", __FUNCTION__, psDevInfo->uiFBDevID));
+               return OMAPLFB_FALSE;
+       }
+
+       switch(eMode)
+       {
+               case OMAPLFB_UPDATE_MODE_AUTO:
+                       eDSSMode = OMAP_DSS_UPDATE_AUTO;
+                       break;
+               case OMAPLFB_UPDATE_MODE_MANUAL:
+                       eDSSMode = OMAP_DSS_UPDATE_MANUAL;
+                       break;
+               case OMAPLFB_UPDATE_MODE_DISABLED:
+                       eDSSMode = OMAP_DSS_UPDATE_DISABLED;
+                       break;
+               default:
+                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown update mode (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, eMode));
+                       return OMAPLFB_FALSE;
+       }
+
+       res = psDSSDrv->set_update_mode(psDSSDev, eDSSMode);
+       if (res != 0)
        {
-               printk(KERN_INFO DRIVER_PREFIX ": OMAPLFBInstallVSyncISR: Request OMAPLCD IRQ failed\n");
-               return (OMAP_ERROR_INIT_FAILURE);
+               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: set_update_mode failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res));
        }
 
+       return (res == 0);
 #endif
-       return (OMAP_OK);
+return 1;
 }
 
-
-OMAP_ERROR OMAPLFBUninstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain)
+OMAPLFB_BOOL OMAPLFBWaitForVSync(OMAPLFB_DEVINFO *psDevInfo)
 {
-#if defined(SYS_USING_INTERRUPTS)
-       OMAPLFBDisableVSyncInterrupt(psSwapChain);
+#if 0
+
+//     struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
 
-       OMAPLFBUnregisterVSyncISR(psSwapChain);
+        struct ti81xxfb_device *psDSSDev = FB2TFB(psDevInfo->psLINFBInfo);
+
+       OMAP_DSS_MANAGER(psDSSMan, psDSSDev);
+
+       if (psDSSMan != NULL && WAIT_FOR_VSYNC(psDSSMan) != NULL)
+       {
+               int res = WAIT_FOR_VSYNC(psDSSMan)(psDSSMan);
+               if (res != 0)
+               {
+                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Wait for vsync failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res));
+                       return OMAPLFB_FALSE;
+               }
+       }
 
 #endif
-       return (OMAP_OK);
+
+        struct vps_grpx_ctrl *gctrl;
+        int r = 0;
+        struct ti81xxfb_info *ofbi;
+
+        ofbi = FB2TFB(psDevInfo->psLINFBInfo);
+
+        gctrl = ofbi->gctrl;
+        r = gctrl->wait_for_vsync(gctrl); 
+
+       return OMAPLFB_TRUE;
 }
 
-void OMAPLFBEnableDisplayRegisterAccess(void)
+OMAPLFB_BOOL OMAPLFBManualSync(OMAPLFB_DEVINFO *psDevInfo)
 {
-#if !defined(SUPPORT_OMAP3430_OMAPFB3)
-       omap2_disp_get_dss();
+#if 0
+//     struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+        struct ti81xxfb_device *psDSSDev = FB2TFB(psDevInfo->psLINFBInfo);
+       OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
+
+       if (psDSSDrv != NULL && psDSSDrv->sync != NULL)
+       {
+               int res = psDSSDrv->sync(psDSSDev);
+               if (res != 0)
+               {
+                       printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Sync failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+                       return OMAPLFB_FALSE;
+               }
+       }
+printk (" OMAPLFBManualSync Not Supported \n");
 #endif
+
+       return OMAPLFB_TRUE;
 }
 
-void OMAPLFBDisableDisplayRegisterAccess(void)
+OMAPLFB_BOOL OMAPLFBCheckModeAndSync(OMAPLFB_DEVINFO *psDevInfo)
 {
-#if !defined(SUPPORT_OMAP3430_OMAPFB3)
-       omap2_disp_put_dss();
-#endif
-}
+       OMAPLFB_UPDATE_MODE eMode = OMAPLFBGetUpdateMode(psDevInfo);
 
+       switch(eMode)
+       {
+               case OMAPLFB_UPDATE_MODE_AUTO:
+               case OMAPLFB_UPDATE_MODE_MANUAL:
+                       return OMAPLFBManualSync(psDevInfo);
+               default:
+                       break;
+       }
 
-void OMAPLFBFlip(OMAPLFB_SWAPCHAIN *psSwapChain, unsigned long aPhyAddr)
+       return OMAPLFB_TRUE;
+}
+
+static int OMAPLFBFrameBufferEvents(struct notifier_block *psNotif,
+                             unsigned long event, void *data)
 {
-       unsigned long control;
+       OMAPLFB_DEVINFO *psDevInfo;
+       struct fb_event *psFBEvent = (struct fb_event *)data;
+       struct fb_info *psFBInfo = psFBEvent->info;
+       OMAPLFB_BOOL bBlanked;
 
        
-       OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA0, aPhyAddr);
-       OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA1, aPhyAddr);
+       if (event != FB_EVENT_BLANK)
+       {
+               return 0;
+       }
 
-       control = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_CONTROL);
-       control |= OMAP_CONTROL_GOLCD;
-       OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_CONTROL, control);
-}
+       bBlanked = (*(IMG_INT *)psFBEvent->data != 0) ? OMAPLFB_TRUE: OMAPLFB_FALSE;
+
+       psDevInfo = OMAPLFBGetDevInfoPtr(psFBInfo->node);
+
+#if 0
+       if (psDevInfo != NULL)
+       {
+               if (bBlanked)
+               {
+                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Blank event received\n", __FUNCTION__, psDevInfo->uiFBDevID));
+               }
+               else
+               {
+                       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unblank event received\n", __FUNCTION__, psDevInfo->uiFBDevID));
+               }
+       }
+       else
+       {
+               DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Blank/Unblank event for unknown framebuffer\n", __FUNCTION__, psFBInfo->node));
+       }
 #endif
 
-#if defined(LDM_PLATFORM)
+       if (psDevInfo != NULL)
+       {
+               OMAPLFBAtomicBoolSet(&psDevInfo->sBlanked, bBlanked);
+               OMAPLFBAtomicIntInc(&psDevInfo->sBlankEvents);
+       }
 
-static OMAP_BOOL bDeviceSuspended;
+       return 0;
+}
 
-static void OMAPLFBCommonSuspend(void)
+OMAPLFB_ERROR OMAPLFBUnblankDisplay(OMAPLFB_DEVINFO *psDevInfo)
 {
-       if (bDeviceSuspended)
+       int res;
+#ifdef FBDEV_PRESENT
+
+       acquire_console_sem();
+       res = fb_blank(psDevInfo->psLINFBInfo, 0);
+       release_console_sem();
+       if (res != 0 && res != -EINVAL)
        {
-               return;
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: fb_blank failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+               return (OMAPLFB_ERROR_GENERIC);
        }
 
-       OMAPLFBDriverSuspend();
+#endif
+       return (OMAPLFB_OK);
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
 
-       bDeviceSuspended = OMAP_TRUE;
+static void OMAPLFBBlankDisplay(OMAPLFB_DEVINFO *psDevInfo)
+{
+       acquire_console_sem();
+       fb_blank(psDevInfo->psLINFBInfo, 1);
+       release_console_sem();
 }
 
-static int OMAPLFBDriverSuspend_Entry(struct platform_device unref__ *pDevice, pm_message_t unref__ state)
+static void OMAPLFBEarlySuspendHandler(struct early_suspend *h)
 {
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": OMAPLFBDriverSuspend_Entry\n"));
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
 
-       OMAPLFBCommonSuspend();
+       for (i=0; i < uiMaxFBDevIDPlusOne; i++)
+       {
+               OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
 
-       return 0;
+               if (psDevInfo != NULL)
+               {
+                       OMAPLFBAtomicBoolSet(&psDevInfo->sEarlySuspendFlag, OMAPLFB_TRUE);
+                       OMAPLFBBlankDisplay(psDevInfo);
+               }
+       }
 }
 
-static int OMAPLFBDriverResume_Entry(struct platform_device unref__ *pDevice)
+static void OMAPLFBEarlyResumeHandler(struct early_suspend *h)
 {
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": OMAPLFBDriverResume_Entry\n"));
-
-       OMAPLFBDriverResume();
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
 
-       bDeviceSuspended = OMAP_FALSE;
+       for (i=0; i < uiMaxFBDevIDPlusOne; i++)
+       {
+               OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
 
-       return 0;
+               if (psDevInfo != NULL)
+               {
+                       OMAPLFBUnblankDisplay(psDevInfo);
+                       OMAPLFBAtomicBoolSet(&psDevInfo->sEarlySuspendFlag, OMAPLFB_FALSE);
+               }
+       }
 }
 
-static IMG_VOID OMAPLFBDriverShutdown_Entry(struct platform_device unref__ *pDevice)
+#endif 
+
+OMAPLFB_ERROR OMAPLFBEnableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo)
 {
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": OMAPLFBDriverShutdown_Entry\n"));
+       int                res;
+       OMAPLFB_ERROR         eError;
 
-       OMAPLFBCommonSuspend();
-}
+       
+       memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
 
-static struct platform_driver omaplfb_driver = {
-       .driver = {
-               .name           = DRVNAME,
-       },
-       .suspend        = OMAPLFBDriverSuspend_Entry,
-       .resume         = OMAPLFBDriverResume_Entry,
-       .shutdown       = OMAPLFBDriverShutdown_Entry,
-};
+       psDevInfo->sLINNotifBlock.notifier_call = OMAPLFBFrameBufferEvents;
 
-#if defined(MODULE)
+       OMAPLFBAtomicBoolSet(&psDevInfo->sBlanked, OMAPLFB_FALSE);
+       OMAPLFBAtomicIntSet(&psDevInfo->sBlankEvents, 0);
 
-static void OMAPLFBDeviceRelease_Entry(struct device unref__ *pDevice)
-{
-       DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": OMAPLFBDriverRelease_Entry\n"));
+       res = fb_register_client(&psDevInfo->sLINNotifBlock);
+       if (res != 0)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: fb_register_client failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
 
-       OMAPLFBCommonSuspend();
-}
+               return (OMAPLFB_ERROR_GENERIC);
+       }
 
-static struct platform_device omaplfb_device = {
-       .name                   = DEVNAME,
-       .id                             = -1,
-       .dev                    = {
-               .release                = OMAPLFBDeviceRelease_Entry
+       eError = OMAPLFBUnblankDisplay(psDevInfo);
+       if (eError != OMAPLFB_OK)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: UnblankDisplay failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, eError);
+               return eError;
        }
-};
 
-#endif  
+#ifdef CONFIG_HAS_EARLYSUSPEND
+       psDevInfo->sEarlySuspend.suspend = OMAPLFBEarlySuspendHandler;
+       psDevInfo->sEarlySuspend.resume = OMAPLFBEarlyResumeHandler;
+       psDevInfo->sEarlySuspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN;
+       register_early_suspend(&psDevInfo->sEarlySuspend);
+#endif
 
-#endif 
+       return (OMAPLFB_OK);
+}
 
-static int __init OMAPLFB_Init(void)
+OMAPLFB_ERROR OMAPLFBDisableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo)
 {
-#if defined(LDM_PLATFORM)
-       int error;
+       int res;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+       unregister_early_suspend(&psDevInfo->sEarlySuspend);
 #endif
 
-       if(OMAPLFBInit() != OMAP_OK)
+       
+       res = fb_unregister_client(&psDevInfo->sLINNotifBlock);
+       if (res != 0)
        {
-               printk(KERN_WARNING DRIVER_PREFIX ": OMAPLFB_Init: OMAPLFBInit failed\n");
-               return -ENODEV;
+               printk(KERN_WARNING DRIVER_PREFIX
+                       ": %s: Device %u: fb_unregister_client failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+               return (OMAPLFB_ERROR_GENERIC);
        }
 
-#if defined(LDM_PLATFORM)
-       if ((error = platform_driver_register(&omaplfb_driver)) != 0)
+       OMAPLFBAtomicBoolSet(&psDevInfo->sBlanked, OMAPLFB_FALSE);
+
+       return (OMAPLFB_OK);
+}
+
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL)
+static OMAPLFB_DEVINFO *OMAPLFBPVRDevIDToDevInfo(unsigned uiPVRDevID)
+{
+       unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+       unsigned i;
+
+       for (i=0; i < uiMaxFBDevIDPlusOne; i++)
        {
-               printk(KERN_WARNING DRIVER_PREFIX ": OMAPLFB_Init: Unable to register platform driver (%d)\n", error);
+               OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
 
-               goto ExitDeinit;
+               if (psDevInfo->uiPVRDevID == uiPVRDevID)
+               {
+                       return psDevInfo;
+               }
        }
 
-#if defined(MODULE)
-       if ((error = platform_device_register(&omaplfb_device)) != 0)
+       printk(KERN_WARNING DRIVER_PREFIX
+               ": %s: PVR Device %u: Couldn't find device\n", __FUNCTION__, uiPVRDevID);
+
+       return NULL;
+}
+
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Ioctl)(struct drm_device unref__ *dev, void *arg, struct drm_file unref__ *pFile)
+{
+       uint32_t *puiArgs;
+       uint32_t uiCmd;
+       unsigned uiPVRDevID;
+       int ret = 0;
+       OMAPLFB_DEVINFO *psDevInfo;
+
+       if (arg == NULL)
        {
-               platform_driver_unregister(&omaplfb_driver);
+               return -EFAULT;
+       }
 
-               printk(KERN_WARNING DRIVER_PREFIX ": OMAPLFB_Init: Unable to register platform device (%d)\n", error);
+       puiArgs = (uint32_t *)arg;
+       uiCmd = puiArgs[PVR_DRM_DISP_ARG_CMD];
+       uiPVRDevID = puiArgs[PVR_DRM_DISP_ARG_DEV];
 
-               goto ExitDeinit;
+       psDevInfo = OMAPLFBPVRDevIDToDevInfo(uiPVRDevID);
+       if (psDevInfo == NULL)
+       {
+               return -EINVAL;
        }
+
+
+       switch (uiCmd)
+       {
+               case PVR_DRM_DISP_CMD_LEAVE_VT:
+               case PVR_DRM_DISP_CMD_ENTER_VT:
+               {
+                       OMAPLFB_BOOL bLeaveVT = (uiCmd == PVR_DRM_DISP_CMD_LEAVE_VT);
+                       DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: PVR Device %u: %s\n",
+                               __FUNCTION__, uiPVRDevID,
+                               bLeaveVT ? "Leave VT" : "Enter VT"));
+
+                       OMAPLFBCreateSwapChainLock(psDevInfo);
+                       
+                       OMAPLFBAtomicBoolSet(&psDevInfo->sLeaveVT, bLeaveVT);
+                       if (psDevInfo->psSwapChain != NULL)
+                       {
+                               flush_workqueue(psDevInfo->psSwapChain->psWorkQueue);
+
+                               if (bLeaveVT)
+                               {
+                                       OMAPLFBFlip(psDevInfo, &psDevInfo->sSystemBuffer);
+                                       (void) OMAPLFBCheckModeAndSync(psDevInfo);
+                               }
+                       }
+
+                       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+                       (void) OMAPLFBUnblankDisplay(psDevInfo);
+                       break;
+               }
+               case PVR_DRM_DISP_CMD_ON:
+               case PVR_DRM_DISP_CMD_STANDBY:
+               case PVR_DRM_DISP_CMD_SUSPEND:
+               case PVR_DRM_DISP_CMD_OFF:
+               {
+                       int iFBMode;
+#if defined(DEBUG)
+                       {
+                               const char *pszMode;
+                               switch(uiCmd)
+                               {
+                                       case PVR_DRM_DISP_CMD_ON:
+                                               pszMode = "On";
+                                               break;
+                                       case PVR_DRM_DISP_CMD_STANDBY:
+                                               pszMode = "Standby";
+                                               break;
+                                       case PVR_DRM_DISP_CMD_SUSPEND:
+                                               pszMode = "Suspend";
+                                               break;
+                                       case PVR_DRM_DISP_CMD_OFF:
+                                               pszMode = "Off";
+                                               break;
+                                       default:
+                                               pszMode = "(Unknown Mode)";
+                                               break;
+                               }
+                               printk (KERN_WARNING DRIVER_PREFIX ": %s: PVR Device %u: Display %s\n",
+                               __FUNCTION__, uiPVRDevID, pszMode);
+                       }
 #endif
+                       switch(uiCmd)
+                       {
+                               case PVR_DRM_DISP_CMD_ON:
+                                       iFBMode = FB_BLANK_UNBLANK;
+                                       break;
+                               case PVR_DRM_DISP_CMD_STANDBY:
+                                       iFBMode = FB_BLANK_HSYNC_SUSPEND;
+                                       break;
+                               case PVR_DRM_DISP_CMD_SUSPEND:
+                                       iFBMode = FB_BLANK_VSYNC_SUSPEND;
+                                       break;
+                               case PVR_DRM_DISP_CMD_OFF:
+                                       iFBMode = FB_BLANK_POWERDOWN;
+                                       break;
+                               default:
+                                       return -EINVAL;
+                       }
+
+                       OMAPLFBCreateSwapChainLock(psDevInfo);
+
+                       if (psDevInfo->psSwapChain != NULL)
+                       {
+                               flush_workqueue(psDevInfo->psSwapChain->psWorkQueue);
+                       }
+
+                       acquire_console_sem();
+                       ret = fb_blank(psDevInfo->psLINFBInfo, iFBMode);
+                       release_console_sem();
+
+                       OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+                       break;
+               }
+               default:
+               {
+                       ret = -EINVAL;
+                       break;
+               }
+       }
 
-#endif 
+       return ret;
+}
+#endif
 
-       return 0;
+#if defined(SUPPORT_DRI_DRM)
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device unref__ *dev)
+#else
+static int __init OMAPLFB_Init(void)
+#endif
+{
 
-#if defined(LDM_PLATFORM)
-ExitDeinit:
-       if(OMAPLFBDeinit() != OMAP_OK)
+       if(OMAPLFBInit() != OMAPLFB_OK)
        {
-               printk(KERN_WARNING DRIVER_PREFIX ": OMAPLFB_Init: OMAPLFBDeinit failed\n");
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: OMAPLFBInit failed\n", __FUNCTION__);
+               return -ENODEV;
        }
 
-       return -ENODEV;
-#endif 
+       return 0;
+
 }
 
-static IMG_VOID __exit OMAPLFB_Cleanup(IMG_VOID)
-{    
-#if defined (LDM_PLATFORM)
-#if defined (MODULE)
-       platform_device_unregister(&omaplfb_device);
-#endif
-       platform_driver_unregister(&omaplfb_driver);
+#if defined(SUPPORT_DRI_DRM)
+void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device unref__ *dev)
+#else
+static void __exit OMAPLFB_Cleanup(void)
 #endif
-
-       if(OMAPLFBDeinit() != OMAP_OK)
+{    
+       if(OMAPLFBDeInit() != OMAPLFB_OK)
        {
-               printk(KERN_WARNING DRIVER_PREFIX ": OMAPLFB_Cleanup: OMAPLFBDeinit failed\n");
+               printk(KERN_WARNING DRIVER_PREFIX ": %s: OMAPLFBDeInit failed\n", __FUNCTION__);
        }
 }
 
-module_init(OMAPLFB_Init);
+#if !defined(SUPPORT_DRI_DRM)
+late_initcall(OMAPLFB_Init);
 module_exit(OMAPLFB_Cleanup);
-
+#endif
diff --git a/services4/3rdparty/linux_drm/Kbuild b/services4/3rdparty/linux_drm/Kbuild
new file mode 100644 (file)
index 0000000..aededed
--- /dev/null
@@ -0,0 +1,51 @@
+SYS_USING_INTERRUPTS = 1
+SUPPORT_OMAP3430_OMAPFB3 =1
+SUPPORT_TI_DSS_FW = 0
+PVR_LINUX_USING_WORKQUEUES = 1
+SUPPORT_DRI_DRM =1
+SYS_CFLAGS.$(SYS_USING_INTERRUPTS)                      += -DSYS_USING_INTERRUPTS
+SYS_CFLAGS.$(SUPPORT_OMAP3430_OMAPFB3)                         += -DSUPPORT_OMAP3430_OMAPFB3
+SYS_CFLAGS.$(SUPPORT_TI_DSS_FW)                         += -DSUPPORT_TI_DSS_FW
+SYS_CFLAGS.$(PVR_LINUX_USING_WORKQUEUES)             += -DPVR_LINUX_USING_WORKQUEUES
+
+EXT_SOURCE_DIR := $(KERNELDIR)/drivers/gpu/drm
+
+EXTRA_CFLAGS = -DLINUX \
+               -DCONFIG_PCI \
+               -Wno-error \
+               -I$(KERNELDIR)/include/drm \
+               -I$(KERNELDIR)/include/linux \
+               -I$(EXT_SOURCE_DIR) \
+               $(SYS_CFLAGS.1) \
+
+ifeq ($(SUPPORT_DRI_DRM),1)
+EXTRA_CFLAGS += -DPVR_DISPLAY_CONTROLLER_DRM_IOCTL
+ifeq ($(TI_PLATFORM),omap4)
+EXTRA_CFLAGS += -DCONFIG_SLOW_WORK
+endif
+endif
+
+obj-m := drm.o
+ifeq ($(TI_PLATFORM),omap4)
+drm-y := pvr_drm_stubs.o drm_auth.o drm_bufs.o drm_cache.o drm_context.o drm_dma.o drm_drawable.o drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
+       drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+       drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+       drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+       drm_crtc.o drm_modes.o drm_edid.o \
+       drm_info.o drm_debugfs.o drm_encoder_slave.o
+else
+drm-y := pvr_drm_stubs.o drm_auth.o drm_bufs.o drm_cache.o drm_context.o drm_dma.o drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
+       drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+       drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+       drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+       drm_crtc.o drm_modes.o drm_edid.o \
+       drm_info.o drm_debugfs.o drm_encoder_slave.o drm_global.o drm_platform.o drm_trace_points.o
+endif
+# less than 2.6.32 kernel
+
+#drm-y := pvr_drm_stubs.o drm_auth.o drm_bufs.o drm_cache.o drm_context.o drm_dma.o drm_drawable.o drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
+#        drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+#        drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+#        drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+#        drm_crtc.o drm_modes.o drm_edid.o \
+#        drm_info.o drm_debugfs.o drm_encoder_slave.o
diff --git a/services4/3rdparty/linux_drm/Kbuild_org b/services4/3rdparty/linux_drm/Kbuild_org
new file mode 100644 (file)
index 0000000..c66ab7a
--- /dev/null
@@ -0,0 +1,31 @@
+obj-m  := drm.o
+
+EXT_SOURCE_DIR := $(KERNELDIR)/drivers/gpu/drm
+EXT_BUILD_DIR :=tmp_$(TI_PLATFORM)_$(BUILD)_drm
+
+FILES := \
+pvr_drm_stubs.c \
+$(EXT_BUILD_DIR)/drm_auth.c \
+
+
+EXT_SOURCE_LIST := \
+        drm_auth.c drm_bufs.c drm_cache.c \
+        drm_context.c drm_dma.c drm_drawable.c \
+        drm_drv.c drm_fops.c drm_gem.c drm_ioctl.c drm_irq.c \
+        drm_lock.c drm_memory.c drm_proc.c drm_stub.c drm_vm.c \
+        drm_agpsupport.c drm_scatter.c ati_pcigart.c drm_pci.c \
+        drm_sysfs.c drm_hashtab.c drm_sman.c drm_mm.c \
+        drm_crtc.c drm_modes.c drm_edid.c \
+        drm_info.c drm_debugfs.c drm_encoder_slave.c
+
+EXT_SOURCES := $(addprefix $(EXT_BUILD_DIR)/,$(EXT_SOURCE_LIST))
+FILES += $(EXT_SOURCES)
+
+EXTRA_CFLAGS += -I$(KERNELDIR)/include/drm
+EXTRA_CFLAGS += -I$(EXT_SOURCE_DIR)
+
+EXTRA_CFLAGS +=  -DCONFIG_PCI -Wno-error
+EXTRA_CFLAS += $(ALL_CFLAGS)
+
+
+drm-y  := $(FILES:.c=.o)
diff --git a/services4/3rdparty/linux_drm/ati_pcigart.c b/services4/3rdparty/linux_drm/ati_pcigart.c
new file mode 100644 (file)
index 0000000..1c36492
--- /dev/null
@@ -0,0 +1,201 @@
+/**
+ * \file ati_pcigart.c
+ * ATI PCI GART support
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+# define ATI_PCIGART_PAGE_SIZE         4096    /**< PCI GART page size */
+
+static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
+                                      struct drm_ati_pcigart_info *gart_info)
+{
+       gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
+                                               PAGE_SIZE);
+       if (gart_info->table_handle == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void drm_ati_free_pcigart_table(struct drm_device *dev,
+                                      struct drm_ati_pcigart_info *gart_info)
+{
+       drm_pci_free(dev, gart_info->table_handle);
+       gart_info->table_handle = NULL;
+}
+
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+       struct drm_sg_mem *entry = dev->sg;
+       unsigned long pages;
+       int i;
+       int max_pages;
+
+       /* we need to support large memory configurations */
+       if (!entry) {
+               DRM_ERROR("no scatter/gather memory!\n");
+               return 0;
+       }
+
+       if (gart_info->bus_addr) {
+
+               max_pages = (gart_info->table_size / sizeof(u32));
+               pages = (entry->pages <= max_pages)
+                 ? entry->pages : max_pages;
+
+               for (i = 0; i < pages; i++) {
+                       if (!entry->busaddr[i])
+                               break;
+                       pci_unmap_page(dev->pdev, entry->busaddr[i],
+                                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               }
+
+               if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+                       gart_info->bus_addr = 0;
+       }
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
+           gart_info->table_handle) {
+               drm_ati_free_pcigart_table(dev, gart_info);
+       }
+
+       return 1;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
+
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+       struct drm_local_map *map = &gart_info->mapping;
+       struct drm_sg_mem *entry = dev->sg;
+       void *address = NULL;
+       unsigned long pages;
+       u32 *pci_gart = NULL, page_base, gart_idx;
+       dma_addr_t bus_address = 0;
+       int i, j, ret = 0;
+       int max_ati_pages, max_real_pages;
+
+       if (!entry) {
+               DRM_ERROR("no scatter/gather memory!\n");
+               goto done;
+       }
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+               DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+
+               if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
+                       DRM_ERROR("fail to set dma mask to 0x%Lx\n",
+                                 (unsigned long long)gart_info->table_mask);
+                       ret = 1;
+                       goto done;
+               }
+
+               ret = drm_ati_alloc_pcigart_table(dev, gart_info);
+               if (ret) {
+                       DRM_ERROR("cannot allocate PCI GART page!\n");
+                       goto done;
+               }
+
+               pci_gart = gart_info->table_handle->vaddr;
+               address = gart_info->table_handle->vaddr;
+               bus_address = gart_info->table_handle->busaddr;
+       } else {
+               address = gart_info->addr;
+               bus_address = gart_info->bus_addr;
+               DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
+                         (unsigned long long)bus_address,
+                         (unsigned long)address);
+       }
+
+
+       max_ati_pages = (gart_info->table_size / sizeof(u32));
+       max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
+       pages = (entry->pages <= max_real_pages)
+           ? entry->pages : max_real_pages;
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+               memset(pci_gart, 0, max_ati_pages * sizeof(u32));
+       } else {
+               memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
+       }
+
+       gart_idx = 0;
+       for (i = 0; i < pages; i++) {
+               /* we need to support large memory configurations */
+               entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
+                                                0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
+                       DRM_ERROR("unable to map PCIGART pages!\n");
+                       drm_ati_pcigart_cleanup(dev, gart_info);
+                       address = NULL;
+                       bus_address = 0;
+                       goto done;
+               }
+               page_base = (u32) entry->busaddr[i];
+
+               for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+                       u32 val;
+
+                       switch(gart_info->gart_reg_if) {
+                       case DRM_ATI_GART_IGP:
+                               val = page_base | 0xc;
+                               break;
+                       case DRM_ATI_GART_PCIE:
+                               val = (page_base >> 8) | 0xc;
+                               break;
+                       default:
+                       case DRM_ATI_GART_PCI:
+                               val = page_base;
+                               break;
+                       }
+                       if (gart_info->gart_table_location ==
+                           DRM_ATI_GART_MAIN)
+                               pci_gart[gart_idx] = cpu_to_le32(val);
+                       else
+                               DRM_WRITE32(map, gart_idx * sizeof(u32), val);
+                       gart_idx++;
+                       page_base += ATI_PCIGART_PAGE_SIZE;
+               }
+       }
+       ret = 1;
+
+#if defined(__i386__) || defined(__x86_64__)
+       wbinvd();
+#else
+       mb();
+#endif
+
+      done:
+       gart_info->addr = address;
+       gart_info->bus_addr = bus_address;
+       return ret;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_init);
diff --git a/services4/3rdparty/linux_drm/drm_agpsupport.c b/services4/3rdparty/linux_drm/drm_agpsupport.c
new file mode 100644 (file)
index 0000000..252fdb9
--- /dev/null
@@ -0,0 +1,475 @@
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#if __OS_HAS_AGP
+
+#include <asm/agp.h>
+
+/**
+ * Get AGP information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a (output) drm_agp_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been initialized and acquired and fills in the
+ * drm_agp_info structure with the information in drm_agp_head::agp_info.
+ */
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
+{
+       DRM_AGP_KERN *kern;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+
+       kern = &dev->agp->agp_info;
+       info->agp_version_major = kern->version.major;
+       info->agp_version_minor = kern->version.minor;
+       info->mode = kern->mode;
+       info->aperture_base = kern->aper_base;
+       info->aperture_size = kern->aper_size * 1024 * 1024;
+       info->memory_allowed = kern->max_memory << PAGE_SHIFT;
+       info->memory_used = kern->current_memory << PAGE_SHIFT;
+       info->id_vendor = kern->device->vendor;
+       info->id_device = kern->device->device;
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_info);
+
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_info *info = data;
+       int err;
+
+       err = drm_agp_info(dev, info);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+/**
+ * Acquire the AGP device.
+ *
+ * \param dev DRM device that is to acquire AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire(struct drm_device * dev)
+{
+       if (!dev->agp)
+               return -ENODEV;
+       if (dev->agp->acquired)
+               return -EBUSY;
+       if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
+               return -ENODEV;
+       dev->agp->acquired = 1;
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_acquire);
+
+/**
+ * Acquire the AGP device (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
+}
+
+/**
+ * Release the AGP device.
+ *
+ * \param dev DRM device that is to release AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
+ */
+int drm_agp_release(struct drm_device * dev)
+{
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       agp_backend_release(dev->agp->bridge);
+       dev->agp->acquired = 0;
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_release);
+
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       return drm_agp_release(dev);
+}
+
+/**
+ * Enable the AGP bus.
+ *
+ * \param dev DRM device that has previously acquired AGP.
+ * \param mode Requested AGP mode.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired but not enabled, and calls
+ * \c agp_enable.
+ */
+int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
+{
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+
+       dev->agp->mode = mode.mode;
+       agp_enable(dev->agp->bridge, mode.mode);
+       dev->agp->enabled = 1;
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_enable);
+
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_agp_mode *mode = data;
+
+       return drm_agp_enable(dev, *mode);
+}
+
+/**
+ * Allocate AGP memory.
+ *
+ * \param inode device inode.
+ * \param file_priv file private pointer.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired, allocates the
+ * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
+ */
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+       struct drm_agp_mem *entry;
+       DRM_AGP_MEM *memory;
+       unsigned long pages;
+       u32 type;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+               return -ENOMEM;
+
+       memset(entry, 0, sizeof(*entry));
+
+       pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+       type = (u32) request->type;
+       if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
+               kfree(entry);
+               return -ENOMEM;
+       }
+
+       entry->handle = (unsigned long)memory->key + 1;
+       entry->memory = memory;
+       entry->bound = 0;
+       entry->pages = pages;
+       list_add(&entry->head, &dev->agp->memory);
+
+       request->handle = entry->handle;
+       request->physical = memory->physical;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_alloc);
+
+
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_agp_buffer *request = data;
+
+       return drm_agp_alloc(dev, request);
+}
+
+/**
+ * Search for the AGP memory entry associated with a handle.
+ *
+ * \param dev DRM device structure.
+ * \param handle AGP memory handle.
+ * \return pointer to the drm_agp_mem structure associated with \p handle.
+ *
+ * Walks through drm_agp_head::memory until finding a matching handle.
+ */
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
+                                          unsigned long handle)
+{
+       struct drm_agp_mem *entry;
+
+       list_for_each_entry(entry, &dev->agp->memory, head) {
+               if (entry->handle == handle)
+                       return entry;
+       }
+       return NULL;
+}
+
+/**
+ * Unbind AGP memory from the GATT (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and acquired, looks-up the AGP memory
+ * entry and passes it to the unbind_agp() function.
+ */
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+       struct drm_agp_mem *entry;
+       int ret;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (!entry->bound)
+               return -EINVAL;
+       ret = drm_unbind_agp(entry->memory);
+       if (ret == 0)
+               entry->bound = 0;
+       return ret;
+}
+EXPORT_SYMBOL(drm_agp_unbind);
+
+
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_agp_binding *request = data;
+
+       return drm_agp_unbind(dev, request);
+}
+
+/**
+ * Bind AGP memory into the GATT (ioctl)
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and that no memory
+ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
+ * it to bind_agp() function.
+ */
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+       struct drm_agp_mem *entry;
+       int retcode;
+       int page;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (entry->bound)
+               return -EINVAL;
+       page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+       if ((retcode = drm_bind_agp(entry->memory, page)))
+               return retcode;
+       entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+       DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
+                 dev->agp->base, entry->bound);
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_bind);
+
+
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_binding *request = data;
+
+       return drm_agp_bind(dev, request);
+}
+
+/**
+ * Free AGP memory (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and looks up the
+ * AGP memory entry. If the memory it's currently bound, unbind it via
+ * unbind_agp(). Frees it via free_agp() as well as the entry itself
+ * and unlinks from the doubly linked list it's inserted in.
+ */
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+       struct drm_agp_mem *entry;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (entry->bound)
+               drm_unbind_agp(entry->memory);
+
+       list_del(&entry->head);
+
+       drm_free_agp(entry->memory, entry->pages);
+       kfree(entry);
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_free);
+
+
+
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_buffer *request = data;
+
+       return drm_agp_free(dev, request);
+}
+
+/**
+ * Initialize the AGP resources.
+ *
+ * \return pointer to a drm_agp_head structure.
+ *
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
+ */
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+       struct drm_agp_head *head = NULL;
+
+       if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+               return NULL;
+       memset((void *)head, 0, sizeof(*head));
+       head->bridge = agp_find_bridge(dev->pdev);
+       if (!head->bridge) {
+               if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
+                       kfree(head);
+                       return NULL;
+               }
+               agp_copy_info(head->bridge, &head->agp_info);
+               agp_backend_release(head->bridge);
+       } else {
+               agp_copy_info(head->bridge, &head->agp_info);
+       }
+       if (head->agp_info.chipset == NOT_SUPPORTED) {
+               kfree(head);
+               return NULL;
+       }
+       INIT_LIST_HEAD(&head->memory);
+       head->cant_use_aperture = head->agp_info.cant_use_aperture;
+       head->page_mask = head->agp_info.page_mask;
+       head->base = head->agp_info.aper_base;
+       return head;
+}
+
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+                  struct page **pages,
+                  unsigned long num_pages,
+                  uint32_t gtt_offset,
+                  u32 type)
+{
+       DRM_AGP_MEM *mem;
+       int ret, i;
+
+       DRM_DEBUG("\n");
+
+       mem = agp_allocate_memory(dev->agp->bridge, num_pages,
+                                     type);
+       if (mem == NULL) {
+               DRM_ERROR("Failed to allocate memory for %ld pages\n",
+                         num_pages);
+               return NULL;
+       }
+
+       for (i = 0; i < num_pages; i++)
+               mem->pages[i] = pages[i];
+       mem->page_count = num_pages;
+
+       mem->is_flushed = true;
+       ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+       if (ret != 0) {
+               DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+               agp_free_memory(mem);
+               return NULL;
+       }
+
+       return mem;
+}
+EXPORT_SYMBOL(drm_agp_bind_pages);
+
+void drm_agp_chipset_flush(struct drm_device *dev)
+{
+       agp_flush_chipset(dev->agp->bridge);
+}
+EXPORT_SYMBOL(drm_agp_chipset_flush);
+
+#endif /* __OS_HAS_AGP */
diff --git a/services4/3rdparty/linux_drm/drm_auth.c b/services4/3rdparty/linux_drm/drm_auth.c
new file mode 100644 (file)
index 0000000..3f46772
--- /dev/null
@@ -0,0 +1,190 @@
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Find the file with the given magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches in drm_device::magiclist within all files with the same hash key
+ * the one with matching magic number, while holding the drm_device::struct_mutex
+ * lock.
+ */
+static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
+{
+       struct drm_file *retval = NULL;
+       struct drm_magic_entry *pt;
+       struct drm_hash_item *hash;
+       struct drm_device *dev = master->minor->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
+               pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+               retval = pt->priv;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return retval;
+}
+
+/**
+ * Adds a magic number.
+ *
+ * \param dev DRM device.
+ * \param priv file private data.
+ * \param magic magic number.
+ *
+ * Creates a drm_magic_entry structure and appends to the linked list
+ * associated the magic number hash key in drm_device::magiclist, while holding
+ * the drm_device::struct_mutex lock.
+ */
+static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
+                        drm_magic_t magic)
+{
+       struct drm_magic_entry *entry;
+       struct drm_device *dev = master->minor->dev;
+       DRM_DEBUG("%d\n", magic);
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+       entry->priv = priv;
+       entry->hash_item.key = (unsigned long)magic;
+       mutex_lock(&dev->struct_mutex);
+       drm_ht_insert_item(&master->magiclist, &entry->hash_item);
+       list_add_tail(&entry->head, &master->magicfree);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Remove a magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches and unlinks the entry in drm_device::magiclist with the magic
+ * number hash key, while holding the drm_device::struct_mutex lock.
+ */
+static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+{
+       struct drm_magic_entry *pt;
+       struct drm_hash_item *hash;
+       struct drm_device *dev = master->minor->dev;
+
+       DRM_DEBUG("%d\n", magic);
+
+       mutex_lock(&dev->struct_mutex);
+       if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+       pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+       drm_ht_remove_item(&master->magiclist, hash);
+       list_del(&pt->head);
+       mutex_unlock(&dev->struct_mutex);
+
+       kfree(pt);
+
+       return 0;
+}
+
+/**
+ * Get a unique magic number (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a resulting drm_auth structure.
+ * \return zero on success, or a negative number on failure.
+ *
+ * If there is a magic number in drm_file::magic then use it, otherwise
+ * searches an unique non-zero magic number and add it associating it with \p
+ * file_priv.
+ */
+int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       static drm_magic_t sequence = 0;
+       static DEFINE_SPINLOCK(lock);
+       struct drm_auth *auth = data;
+
+       /* Find unique magic */
+       if (file_priv->magic) {
+               auth->magic = file_priv->magic;
+       } else {
+               do {
+                       spin_lock(&lock);
+                       if (!sequence)
+                               ++sequence;     /* reserve 0 */
+                       auth->magic = sequence++;
+                       spin_unlock(&lock);
+               } while (drm_find_file(file_priv->master, auth->magic));
+               file_priv->magic = auth->magic;
+               drm_add_magic(file_priv->master, file_priv, auth->magic);
+       }
+
+       DRM_DEBUG("%u\n", auth->magic);
+
+       return 0;
+}
+
+/**
+ * Authenticate with a magic.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_auth structure.
+ * \return zero if authentication successed, or a negative number otherwise.
+ *
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
+ */
+int drm_authmagic(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_auth *auth = data;
+       struct drm_file *file;
+
+       DRM_DEBUG("%u\n", auth->magic);
+       if ((file = drm_find_file(file_priv->master, auth->magic))) {
+               file->authenticated = 1;
+               drm_remove_magic(file_priv->master, auth->magic);
+               return 0;
+       }
+       return -EINVAL;
+}
diff --git a/services4/3rdparty/linux_drm/drm_buffer.c b/services4/3rdparty/linux_drm/drm_buffer.c
new file mode 100644 (file)
index 0000000..529a0db
--- /dev/null
@@ -0,0 +1,184 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Pauli Nieminen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Multipart buffer for coping data which is larger than the page size.
+ *
+ * Authors:
+ * Pauli Nieminen <suokkos-at-gmail-dot-com>
+ */
+
+#include "drm_buffer.h"
+
+/**
+ * Allocate the drm buffer object.
+ *
+ *   buf: Pointer to a pointer where the object is stored.
+ *   size: The number of bytes to allocate.
+ */
+int drm_buffer_alloc(struct drm_buffer **buf, int size)
+{
+       int nr_pages = size / PAGE_SIZE + 1;
+       int idx;
+
+       /* Allocating pointer table to end of structure makes drm_buffer
+        * variable sized */
+       *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
+                       GFP_KERNEL);
+
+       if (*buf == NULL) {
+               DRM_ERROR("Failed to allocate drm buffer object to hold"
+                               " %d bytes in %d pages.\n",
+                               size, nr_pages);
+               return -ENOMEM;
+       }
+
+       (*buf)->size = size;
+
+       for (idx = 0; idx < nr_pages; ++idx) {
+
+               (*buf)->data[idx] =
+                       kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
+                               GFP_KERNEL);
+
+
+               if ((*buf)->data[idx] == NULL) {
+                       DRM_ERROR("Failed to allocate %dth page for drm"
+                                       " buffer with %d bytes and %d pages.\n",
+                                       idx + 1, size, nr_pages);
+                       goto error_out;
+               }
+
+       }
+
+       return 0;
+
+error_out:
+
+       /* Only last element can be null pointer so check for it first. */
+       if ((*buf)->data[idx])
+               kfree((*buf)->data[idx]);
+
+       for (--idx; idx >= 0; --idx)
+               kfree((*buf)->data[idx]);
+
+       kfree(*buf);
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_buffer_alloc);
+
+/**
+ * Copy the user data to the begin of the buffer and reset the processing
+ * iterator.
+ *
+ *   user_data: A pointer the data that is copied to the buffer.
+ *   size: The Number of bytes to copy.
+ */
+int drm_buffer_copy_from_user(struct drm_buffer *buf,
+                             void __user *user_data, int size)
+{
+       int nr_pages = size / PAGE_SIZE + 1;
+       int idx;
+
+       if (size > buf->size) {
+               DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
+                               " %d bytes space\n",
+                               size, buf->size);
+               return -EFAULT;
+       }
+
+       for (idx = 0; idx < nr_pages; ++idx) {
+
+               if (DRM_COPY_FROM_USER(buf->data[idx],
+                       user_data + idx * PAGE_SIZE,
+                       min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
+                       DRM_ERROR("Failed to copy user data (%p) to drm buffer"
+                                       " (%p) %dth page.\n",
+                                       user_data, buf, idx);
+                       return -EFAULT;
+
+               }
+       }
+       buf->iterator = 0;
+       return 0;
+}
+EXPORT_SYMBOL(drm_buffer_copy_from_user);
+
+/**
+ * Free the drm buffer object
+ */
+void drm_buffer_free(struct drm_buffer *buf)
+{
+
+       if (buf != NULL) {
+
+               int nr_pages = buf->size / PAGE_SIZE + 1;
+               int idx;
+               for (idx = 0; idx < nr_pages; ++idx)
+                       kfree(buf->data[idx]);
+
+               kfree(buf);
+       }
+}
+EXPORT_SYMBOL(drm_buffer_free);
+
+/**
+ * Read an object from buffer that may be split to multiple parts. If object
+ * is not split function just returns the pointer to object in buffer. But in
+ * case of split object data is copied to given stack object that is suplied
+ * by caller.
+ *
+ * The processing location of the buffer is also advanced to the next byte
+ * after the object.
+ *
+ *   objsize: The size of the objet in bytes.
+ *   stack_obj: A pointer to a memory location where object can be copied.
+ */
+void *drm_buffer_read_object(struct drm_buffer *buf,
+               int objsize, void *stack_obj)
+{
+       int idx = drm_buffer_index(buf);
+       int page = drm_buffer_page(buf);
+       void *obj = NULL;
+
+       if (idx + objsize <= PAGE_SIZE) {
+               obj = &buf->data[page][idx];
+       } else {
+               /* The object is split which forces copy to temporary object.*/
+               int beginsz = PAGE_SIZE - idx;
+               memcpy(stack_obj, &buf->data[page][idx], beginsz);
+
+               memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
+                               objsize - beginsz);
+
+               obj = stack_obj;
+       }
+
+       drm_buffer_advance(buf, objsize);
+       return obj;
+}
+EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/services4/3rdparty/linux_drm/drm_bufs.c b/services4/3rdparty/linux_drm/drm_bufs.c
new file mode 100644 (file)
index 0000000..3e257a5
--- /dev/null
@@ -0,0 +1,1628 @@
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/log2.h>
+#include <asm/shmparam.h>
+#include "drmP.h"
+
+static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+                                                 struct drm_local_map *map)
+{
+       struct drm_map_list *entry;
+       list_for_each_entry(entry, &dev->maplist, head) {
+               /*
+                * Because the kernel-userspace ABI is fixed at a 32-bit offset
+                * while PCI resources may live above that, we ignore the map
+                * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+                * It is assumed that each driver will have only one resource of
+                * each type.
+                */
+               if (!entry->map ||
+                   map->type != entry->map->type ||
+                   entry->master != dev->primary->master)
+                       continue;
+               switch (map->type) {
+               case _DRM_SHM:
+                       if (map->flags != _DRM_CONTAINS_LOCK)
+                               break;
+               case _DRM_REGISTERS:
+               case _DRM_FRAME_BUFFER:
+                       return entry;
+               default: /* Make gcc happy */
+                       ;
+               }
+               if (entry->map->offset == map->offset)
+                       return entry;
+       }
+
+       return NULL;
+}
+
+static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
+                         unsigned long user_token, int hashed_handle, int shm)
+{
+       int use_hashed_handle, shift;
+       unsigned long add;
+
+#if (BITS_PER_LONG == 64)
+       use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
+#elif (BITS_PER_LONG == 32)
+       use_hashed_handle = hashed_handle;
+#else
+#error Unsupported long size. Neither 64 nor 32 bits.
+#endif
+
+       if (!use_hashed_handle) {
+               int ret;
+               hash->key = user_token >> PAGE_SHIFT;
+               ret = drm_ht_insert_item(&dev->map_hash, hash);
+               if (ret != -EINVAL)
+                       return ret;
+       }
+
+       shift = 0;
+       add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
+       if (shm && (SHMLBA > PAGE_SIZE)) {
+               int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
+
+               /* For shared memory, we have to preserve the SHMLBA
+                * bits of the eventual vma->vm_pgoff value during
+                * mmap().  Otherwise we run into cache aliasing problems
+                * on some platforms.  On these platforms, the pgoff of
+                * a mmap() request is used to pick a suitable virtual
+                * address for the mmap() region such that it will not
+                * cause cache aliasing problems.
+                *
+                * Therefore, make sure the SHMLBA relevant bits of the
+                * hash value we use are equal to those in the original
+                * kernel virtual address.
+                */
+               shift = bits;
+               add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
+       }
+
+       return drm_ht_just_insert_please(&dev->map_hash, hash,
+                                        user_token, 32 - PAGE_SHIFT - 3,
+                                        shift, add);
+}
+
+/**
+ * Core function to create a range of memory available for mapping by a
+ * non-root process.
+ *
+ * Adjusts the memory offset to its absolute value according to the mapping
+ * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
+ * applicable and if supported by the kernel.
+ */
+static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
+                          unsigned int size, enum drm_map_type type,
+                          enum drm_map_flags flags,
+                          struct drm_map_list ** maplist)
+{
+       struct drm_local_map *map;
+       struct drm_map_list *list;
+       drm_dma_handle_t *dmah;
+       unsigned long user_token;
+       int ret;
+
+       map = kmalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       map->offset = offset;
+       map->size = size;
+       map->flags = flags;
+       map->type = type;
+
+       /* Only allow shared memory to be removable since we only keep enough
+        * book keeping information about shared memory to allow for removal
+        * when processes fork.
+        */
+       if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
+               kfree(map);
+               return -EINVAL;
+       }
+       DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
+                 (unsigned long long)map->offset, map->size, map->type);
+
+       /* page-align _DRM_SHM maps. They are allocated here so there is no security
+        * hole created by that and it works around various broken drivers that use
+        * a non-aligned quantity to map the SAREA. --BenH
+        */
+       if (map->type == _DRM_SHM)
+               map->size = PAGE_ALIGN(map->size);
+
+       if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
+               kfree(map);
+               return -EINVAL;
+       }
+       map->mtrr = -1;
+       map->handle = NULL;
+
+       switch (map->type) {
+       case _DRM_REGISTERS:
+       case _DRM_FRAME_BUFFER:
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
+               if (map->offset + (map->size-1) < map->offset ||
+                   map->offset < virt_to_phys(high_memory)) {
+                       kfree(map);
+                       return -EINVAL;
+               }
+#endif
+#ifdef __alpha__
+               map->offset += dev->hose->mem_space->start;
+#endif
+               /* Some drivers preinitialize some maps, without the X Server
+                * needing to be aware of it.  Therefore, we just return success
+                * when the server tries to create a duplicate map.
+                */
+               list = drm_find_matching_map(dev, map);
+               if (list != NULL) {
+                       if (list->map->size != map->size) {
+                               DRM_DEBUG("Matching maps of type %d with "
+                                         "mismatched sizes, (%ld vs %ld)\n",
+                                         map->type, map->size,
+                                         list->map->size);
+                               list->map->size = map->size;
+                       }
+
+                       kfree(map);
+                       *maplist = list;
+                       return 0;
+               }
+
+               if (drm_core_has_MTRR(dev)) {
+                       if (map->type == _DRM_FRAME_BUFFER ||
+                           (map->flags & _DRM_WRITE_COMBINING)) {
+                               map->mtrr = mtrr_add(map->offset, map->size,
+                                                    MTRR_TYPE_WRCOMB, 1);
+                       }
+               }
+               if (map->type == _DRM_REGISTERS) {
+                       map->handle = ioremap(map->offset, map->size);
+                       if (!map->handle) {
+                               kfree(map);
+                               return -ENOMEM;
+                       }
+               }
+
+               break;
+       case _DRM_SHM:
+               list = drm_find_matching_map(dev, map);
+               if (list != NULL) {
+                       if(list->map->size != map->size) {
+                               DRM_DEBUG("Matching maps of type %d with "
+                                         "mismatched sizes, (%ld vs %ld)\n",
+                                         map->type, map->size, list->map->size);
+                               list->map->size = map->size;
+                       }
+
+                       kfree(map);
+                       *maplist = list;
+                       return 0;
+               }
+               map->handle = vmalloc_user(map->size);
+               DRM_DEBUG("%lu %d %p\n",
+                         map->size, drm_order(map->size), map->handle);
+               if (!map->handle) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+               map->offset = (unsigned long)map->handle;
+               if (map->flags & _DRM_CONTAINS_LOCK) {
+                       /* Prevent a 2nd X Server from creating a 2nd lock */
+                       if (dev->primary->master->lock.hw_lock != NULL) {
+                               vfree(map->handle);
+                               kfree(map);
+                               return -EBUSY;
+                       }
+                       dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;   /* Pointer to lock */
+               }
+               break;
+       case _DRM_AGP: {
+               struct drm_agp_mem *entry;
+               int valid = 0;
+
+               if (!drm_core_has_AGP(dev)) {
+                       kfree(map);
+                       return -EINVAL;
+               }
+#ifdef __alpha__
+               map->offset += dev->hose->mem_space->start;
+#endif
+               /* In some cases (i810 driver), user space may have already
+                * added the AGP base itself, because dev->agp->base previously
+                * only got set during AGP enable.  So, only add the base
+                * address if the map's offset isn't already within the
+                * aperture.
+                */
+               if (map->offset < dev->agp->base ||
+                   map->offset > dev->agp->base +
+                   dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
+                       map->offset += dev->agp->base;
+               }
+               map->mtrr = dev->agp->agp_mtrr; /* for getmap */
+
+               /* This assumes the DRM is in total control of AGP space.
+                * It's not always the case as AGP can be in the control
+                * of user space (i.e. i810 driver). So this loop will get
+                * skipped and we double check that dev->agp->memory is
+                * actually set as well as being invalid before EPERM'ing
+                */
+               list_for_each_entry(entry, &dev->agp->memory, head) {
+                       if ((map->offset >= entry->bound) &&
+                           (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
+                               valid = 1;
+                               break;
+                       }
+               }
+               if (!list_empty(&dev->agp->memory) && !valid) {
+                       kfree(map);
+                       return -EPERM;
+               }
+               DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
+                         (unsigned long long)map->offset, map->size);
+
+               break;
+       }
+       case _DRM_GEM:
+               DRM_ERROR("tried to addmap GEM object\n");
+               break;
+       case _DRM_SCATTER_GATHER:
+               if (!dev->sg) {
+                       kfree(map);
+                       return -EINVAL;
+               }
+               map->offset += (unsigned long)dev->sg->virtual;
+               break;
+       case _DRM_CONSISTENT:
+               /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
+                * As we're limiting the address to 2^32-1 (or less),
+                * casting it down to 32 bits is no problem, but we
+                * need to point to a 64bit variable first. */
+               dmah = drm_pci_alloc(dev, map->size, map->size);
+               if (!dmah) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+               map->handle = dmah->vaddr;
+               map->offset = (unsigned long)dmah->busaddr;
+               kfree(dmah);
+               break;
+       default:
+               kfree(map);
+               return -EINVAL;
+       }
+
+       list = kzalloc(sizeof(*list), GFP_KERNEL);
+       if (!list) {
+               if (map->type == _DRM_REGISTERS)
+                       iounmap(map->handle);
+               kfree(map);
+               return -EINVAL;
+       }
+       list->map = map;
+
+       mutex_lock(&dev->struct_mutex);
+       list_add(&list->head, &dev->maplist);
+
+       /* Assign a 32-bit handle */
+       /* We do it here so that dev->struct_mutex protects the increment */
+       user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
+               map->offset;
+       ret = drm_map_handle(dev, &list->hash, user_token, 0,
+                            (map->type == _DRM_SHM));
+       if (ret) {
+               if (map->type == _DRM_REGISTERS)
+                       iounmap(map->handle);
+               kfree(map);
+               kfree(list);
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       list->user_token = list->hash.key << PAGE_SHIFT;
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!(map->flags & _DRM_DRIVER))
+               list->master = dev->primary->master;
+       *maplist = list;
+       return 0;
+       }
+
+int drm_addmap(struct drm_device * dev, resource_size_t offset,
+              unsigned int size, enum drm_map_type type,
+              enum drm_map_flags flags, struct drm_local_map ** map_ptr)
+{
+       struct drm_map_list *list;
+       int rc;
+
+       rc = drm_addmap_core(dev, offset, size, type, flags, &list);
+       if (!rc)
+               *map_ptr = list->map;
+       return rc;
+}
+
+EXPORT_SYMBOL(drm_addmap);
+
+/**
+ * Ioctl to specify a range of memory that is available for mapping by a
+ * non-root process.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ */
+int drm_addmap_ioctl(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_map *map = data;
+       struct drm_map_list *maplist;
+       int err;
+
+       if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
+               return -EPERM;
+
+       err = drm_addmap_core(dev, map->offset, map->size, map->type,
+                             map->flags, &maplist);
+
+       if (err)
+               return err;
+
+       /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
+       map->handle = (void *)(unsigned long)maplist->user_token;
+       return 0;
+}
+
+/**
+ * Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ *
+ * Searches the map on drm_device::maplist, removes it from the list, see if
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+ * \sa drm_addmap
+ */
+int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
+{
+       struct drm_map_list *r_list = NULL, *list_t;
+       drm_dma_handle_t dmah;
+       int found = 0;
+       struct drm_master *master;
+
+       /* Find the list entry for the map and remove it */
+       list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+               if (r_list->map == map) {
+                       master = r_list->master;
+                       list_del(&r_list->head);
+                       drm_ht_remove_key(&dev->map_hash,
+                                         r_list->user_token >> PAGE_SHIFT);
+                       kfree(r_list);
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               return -EINVAL;
+
+       switch (map->type) {
+       case _DRM_REGISTERS:
+               iounmap(map->handle);
+               /* FALLTHROUGH */
+       case _DRM_FRAME_BUFFER:
+               if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+                       int retcode;
+                       retcode = mtrr_del(map->mtrr, map->offset, map->size);
+                       DRM_DEBUG("mtrr_del=%d\n", retcode);
+               }
+               break;
+       case _DRM_SHM:
+               vfree(map->handle);
+               if (master) {
+                       if (dev->sigdata.lock == master->lock.hw_lock)
+                               dev->sigdata.lock = NULL;
+                       master->lock.hw_lock = NULL;   /* SHM removed */
+                       master->lock.file_priv = NULL;
+                       wake_up_interruptible_all(&master->lock.lock_queue);
+               }
+               break;
+       case _DRM_AGP:
+       case _DRM_SCATTER_GATHER:
+               break;
+       case _DRM_CONSISTENT:
+               dmah.vaddr = map->handle;
+               dmah.busaddr = map->offset;
+               dmah.size = map->size;
+               __drm_pci_free(dev, &dmah);
+               break;
+       case _DRM_GEM:
+               DRM_ERROR("tried to rmmap GEM object\n");
+               break;
+       }
+       kfree(map);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_rmmap_locked);
+
+int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
+{
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_rmmap_locked(dev, map);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_rmmap);
+
+/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+ * exit uncleanly.  Therefore, having userland manually remove mappings seems
+ * like a pointless exercise since they're going away anyway.
+ *
+ * One use case might be after addmap is allowed for normal users for SHM and
+ * gets used by drivers that the server doesn't need to care about.  This seems
+ * unlikely.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_map structure.
+ * \return zero on success or a negative value on error.
+ */
+int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_map *request = data;
+       struct drm_local_map *map = NULL;
+       struct drm_map_list *r_list;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               if (r_list->map &&
+                   r_list->user_token == (unsigned long)request->handle &&
+                   r_list->map->flags & _DRM_REMOVABLE) {
+                       map = r_list->map;
+                       break;
+               }
+       }
+
+       /* List has wrapped around to the head pointer, or its empty we didn't
+        * find anything.
+        */
+       if (list_empty(&dev->maplist) || !map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       /* Register and framebuffer maps are permanent */
+       if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
+               mutex_unlock(&dev->struct_mutex);
+               return 0;
+       }
+
+       ret = drm_rmmap_locked(dev, map);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+/**
+ * Cleanup after an error on one of the addbufs() functions.
+ *
+ * \param dev DRM device.
+ * \param entry buffer entry where the error occurred.
+ *
+ * Frees any pages and buffers associated with the given entry.
+ */
+static void drm_cleanup_buf_error(struct drm_device * dev,
+                                 struct drm_buf_entry * entry)
+{
+       int i;
+
+       if (entry->seg_count) {
+               for (i = 0; i < entry->seg_count; i++) {
+                       if (entry->seglist[i]) {
+                               drm_pci_free(dev, entry->seglist[i]);
+                       }
+               }
+               kfree(entry->seglist);
+
+               entry->seg_count = 0;
+       }
+
+       if (entry->buf_count) {
+               for (i = 0; i < entry->buf_count; i++) {
+                       kfree(entry->buflist[i].dev_private);
+               }
+               kfree(entry->buflist);
+
+               entry->buf_count = 0;
+       }
+}
+
+#if __OS_HAS_AGP
+/**
+ * Add AGP buffers for DMA transfers.
+ *
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
+ * \return zero on success or a negative number on failure.
+ *
+ * After some sanity checks creates a drm_buf structure for each buffer and
+ * reallocates the buffer list of the same size order to accommodate the new
+ * buffers.
+ */
+int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_agp_mem *agp_entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i, valid;
+       struct drm_buf **temp_buflist;
+
+       if (!dma)
+               return -EINVAL;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = dev->agp->base + request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lx\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       /* Make sure buffers are located in AGP memory that we own */
+       valid = 0;
+       list_for_each_entry(agp_entry, &dev->agp->memory, head) {
+               if ((agp_offset >= agp_entry->bound) &&
+                   (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
+                       valid = 1;
+                       break;
+               }
+       }
+       if (!list_empty(&dev->agp->memory) && !valid) {
+               DRM_DEBUG("zone invalid\n");
+               return -EINVAL;
+       }
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_AGP;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+EXPORT_SYMBOL(drm_addbufs_agp);
+#endif                         /* __OS_HAS_AGP */
+
+int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int count;
+       int order;
+       int size;
+       int total;
+       int page_order;
+       struct drm_buf_entry *entry;
+       drm_dma_handle_t *dmah;
+       struct drm_buf *buf;
+       int alignment;
+       unsigned long offset;
+       int i;
+       int byte_count;
+       int page_count;
+       unsigned long *temp_pagelist;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
+                 request->count, request->size, size, order, dev->queue_count);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+
+       entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+       if (!entry->seglist) {
+               kfree(entry->buflist);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+
+       /* Keep the original pagelist until we know all the allocations
+        * have succeeded
+        */
+       temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
+                              sizeof(*dma->pagelist), GFP_KERNEL);
+       if (!temp_pagelist) {
+               kfree(entry->buflist);
+               kfree(entry->seglist);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memcpy(temp_pagelist,
+              dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
+       DRM_DEBUG("pagelist: %d entries\n",
+                 dma->page_count + (count << page_order));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+       byte_count = 0;
+       page_count = 0;
+
+       while (entry->buf_count < count) {
+
+               dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
+
+               if (!dmah) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       entry->seg_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       kfree(temp_pagelist);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+               entry->seglist[entry->seg_count++] = dmah;
+               for (i = 0; i < (1 << page_order); i++) {
+                       DRM_DEBUG("page %d @ 0x%08lx\n",
+                                 dma->page_count + page_count,
+                                 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
+                       temp_pagelist[dma->page_count + page_count++]
+                               = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
+               }
+               for (offset = 0;
+                    offset + size <= total && entry->buf_count < count;
+                    offset += alignment, ++entry->buf_count) {
+                       buf = &entry->buflist[entry->buf_count];
+                       buf->idx = dma->buf_count + entry->buf_count;
+                       buf->total = alignment;
+                       buf->order = order;
+                       buf->used = 0;
+                       buf->offset = (dma->byte_count + byte_count + offset);
+                       buf->address = (void *)(dmah->vaddr + offset);
+                       buf->bus_address = dmah->busaddr + offset;
+                       buf->next = NULL;
+                       buf->waiting = 0;
+                       buf->pending = 0;
+                       init_waitqueue_head(&buf->dma_wait);
+                       buf->file_priv = NULL;
+
+                       buf->dev_priv_size = dev->driver->dev_priv_size;
+                       buf->dev_private = kzalloc(buf->dev_priv_size,
+                                               GFP_KERNEL);
+                       if (!buf->dev_private) {
+                               /* Set count correctly so we free the proper amount. */
+                               entry->buf_count = count;
+                               entry->seg_count = count;
+                               drm_cleanup_buf_error(dev, entry);
+                               kfree(temp_pagelist);
+                               mutex_unlock(&dev->struct_mutex);
+                               atomic_dec(&dev->buf_alloc);
+                               return -ENOMEM;
+                       }
+
+                       DRM_DEBUG("buffer %d @ %p\n",
+                                 entry->buf_count, buf->address);
+               }
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               kfree(temp_pagelist);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       /* No allocations failed, so now we can replace the original pagelist
+        * with the new one.
+        */
+       if (dma->page_count) {
+               kfree(dma->pagelist);
+       }
+       dma->pagelist = temp_pagelist;
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += entry->seg_count << page_order;
+       dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       if (request->flags & _DRM_PCI_BUFFER_RO)
+               dma->flags = _DRM_DMA_USE_PCI_RO;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+
+}
+EXPORT_SYMBOL(drm_addbufs_pci);
+
+static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kzalloc(count * sizeof(*entry->buflist),
+                               GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset
+                                       + (unsigned long)dev->sg->virtual);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_SG;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+
+static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kzalloc(count * sizeof(*entry->buflist),
+                               GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_FB;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+
+
+/**
+ * Add buffers for DMA transfers (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_buf_desc request.
+ * \return zero on success or a negative number on failure.
+ *
+ * According with the memory type specified in drm_buf_desc::flags and the
+ * build options, it dispatches the call either to addbufs_agp(),
+ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
+ * PCI memory respectively.
+ */
+int drm_addbufs(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_buf_desc *request = data;
+       int ret;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+#if __OS_HAS_AGP
+       if (request->flags & _DRM_AGP_BUFFER)
+               ret = drm_addbufs_agp(dev, request);
+       else
+#endif
+       if (request->flags & _DRM_SG_BUFFER)
+               ret = drm_addbufs_sg(dev, request);
+       else if (request->flags & _DRM_FB_BUFFER)
+               ret = drm_addbufs_fb(dev, request);
+       else
+               ret = drm_addbufs_pci(dev, request);
+
+       return ret;
+}
+
+/**
+ * Get information about the buffer mappings.
+ *
+ * This was originally mean for debugging purposes, or by a sophisticated
+ * client library to determine how best to use the available buffers (e.g.,
+ * large buffers can be used for image transfer).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * lock, preventing of allocating more buffers after this call. Information
+ * about each requested buffer is then copied into user space.
+ */
+int drm_infobufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_info *request = data;
+       int i;
+       int count;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       spin_lock(&dev->count_lock);
+       if (atomic_read(&dev->buf_alloc)) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       ++dev->buf_use;         /* Can't allocate more after this call */
+       spin_unlock(&dev->count_lock);
+
+       for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+               if (dma->bufs[i].buf_count)
+                       ++count;
+       }
+
+       DRM_DEBUG("count = %d\n", count);
+
+       if (request->count >= count) {
+               for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+                       if (dma->bufs[i].buf_count) {
+                               struct drm_buf_desc __user *to =
+                                   &request->list[count];
+                               struct drm_buf_entry *from = &dma->bufs[i];
+                               struct drm_freelist *list = &dma->bufs[i].freelist;
+                               if (copy_to_user(&to->count,
+                                                &from->buf_count,
+                                                sizeof(from->buf_count)) ||
+                                   copy_to_user(&to->size,
+                                                &from->buf_size,
+                                                sizeof(from->buf_size)) ||
+                                   copy_to_user(&to->low_mark,
+                                                &list->low_mark,
+                                                sizeof(list->low_mark)) ||
+                                   copy_to_user(&to->high_mark,
+                                                &list->high_mark,
+                                                sizeof(list->high_mark)))
+                                       return -EFAULT;
+
+                               DRM_DEBUG("%d %d %d %d %d\n",
+                                         i,
+                                         dma->bufs[i].buf_count,
+                                         dma->bufs[i].buf_size,
+                                         dma->bufs[i].freelist.low_mark,
+                                         dma->bufs[i].freelist.high_mark);
+                               ++count;
+                       }
+               }
+       }
+       request->count = count;
+
+       return 0;
+}
+
+/**
+ * Specifies a low and high water mark for buffer allocation
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg a pointer to a drm_buf_desc structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies that the size order is bounded between the admissible orders and
+ * updates the respective drm_device_dma::bufs entry low and high water mark.
+ *
+ * \note This ioctl is deprecated and mostly never used.
+ */
+int drm_markbufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_desc *request = data;
+       int order;
+       struct drm_buf_entry *entry;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       DRM_DEBUG("%d, %d, %d\n",
+                 request->size, request->low_mark, request->high_mark);
+       order = drm_order(request->size);
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       entry = &dma->bufs[order];
+
+       if (request->low_mark < 0 || request->low_mark > entry->buf_count)
+               return -EINVAL;
+       if (request->high_mark < 0 || request->high_mark > entry->buf_count)
+               return -EINVAL;
+
+       entry->freelist.low_mark = request->low_mark;
+       entry->freelist.high_mark = request->high_mark;
+
+       return 0;
+}
+
+/**
+ * Unreserve the buffers in list, previously reserved using drmDMA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_free structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls free_buffer() for each used buffer.
+ * This function is primarily used for debugging.
+ */
+int drm_freebufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_free *request = data;
+       int i;
+       int idx;
+       struct drm_buf *buf;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       DRM_DEBUG("%d\n", request->count);
+       for (i = 0; i < request->count; i++) {
+               if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
+                       return -EFAULT;
+               if (idx < 0 || idx >= dma->buf_count) {
+                       DRM_ERROR("Index %d (of %d max)\n",
+                                 idx, dma->buf_count - 1);
+                       return -EINVAL;
+               }
+               buf = dma->buflist[idx];
+               if (buf->file_priv != file_priv) {
+                       DRM_ERROR("Process %d freeing buffer not owned\n",
+                                 task_pid_nr(current));
+                       return -EINVAL;
+               }
+               drm_free_buffer(dev, buf);
+       }
+
+       return 0;
+}
+
+/**
+ * Maps all of the DMA buffers into client-virtual space (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
+ */
+int drm_mapbufs(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int retcode = 0;
+       const int zero = 0;
+       unsigned long virtual;
+       unsigned long address;
+       struct drm_buf_map *request = data;
+       int i;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       spin_lock(&dev->count_lock);
+       if (atomic_read(&dev->buf_alloc)) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       dev->buf_use++;         /* Can't allocate more after this call */
+       spin_unlock(&dev->count_lock);
+
+       if (request->count >= dma->buf_count) {
+               if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+                   || (drm_core_check_feature(dev, DRIVER_SG)
+                       && (dma->flags & _DRM_DMA_USE_SG))
+                   || (drm_core_check_feature(dev, DRIVER_FB_DMA)
+                       && (dma->flags & _DRM_DMA_USE_FB))) {
+                       struct drm_local_map *map = dev->agp_buffer_map;
+                       unsigned long token = dev->agp_buffer_token;
+
+                       if (!map) {
+                               retcode = -EINVAL;
+                               goto done;
+                       }
+                       down_write(&current->mm->mmap_sem);
+                       virtual = do_mmap(file_priv->filp, 0, map->size,
+                                         PROT_READ | PROT_WRITE,
+                                         MAP_SHARED,
+                                         token);
+                       up_write(&current->mm->mmap_sem);
+               } else {
+                       down_write(&current->mm->mmap_sem);
+                       virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
+                                         PROT_READ | PROT_WRITE,
+                                         MAP_SHARED, 0);
+                       up_write(&current->mm->mmap_sem);
+               }
+               if (virtual > -1024UL) {
+                       /* Real error */
+                       retcode = (signed long)virtual;
+                       goto done;
+               }
+               request->virtual = (void __user *)virtual;
+
+               for (i = 0; i < dma->buf_count; i++) {
+                       if (copy_to_user(&request->list[i].idx,
+                                        &dma->buflist[i]->idx,
+                                        sizeof(request->list[0].idx))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       if (copy_to_user(&request->list[i].total,
+                                        &dma->buflist[i]->total,
+                                        sizeof(request->list[0].total))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       if (copy_to_user(&request->list[i].used,
+                                        &zero, sizeof(zero))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       address = virtual + dma->buflist[i]->offset;    /* *** */
+                       if (copy_to_user(&request->list[i].address,
+                                        &address, sizeof(address))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+      done:
+       request->count = dma->buf_count;
+       DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
+
+       return retcode;
+}
+
+/**
+ * Compute size order.  Returns the exponent of the smaller power of two which
+ * is greater or equal to given number.
+ *
+ * \param size size.
+ * \return order.
+ *
+ * \todo Can be made faster.
+ */
+int drm_order(unsigned long size)
+{
+       int order;
+       unsigned long tmp;
+
+       for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+
+       if (size & (size - 1))
+               ++order;
+
+       return order;
+}
+EXPORT_SYMBOL(drm_order);
diff --git a/services4/3rdparty/linux_drm/drm_cache.c b/services4/3rdparty/linux_drm/drm_cache.c
new file mode 100644 (file)
index 0000000..0e3bd5b
--- /dev/null
@@ -0,0 +1,99 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+#if defined(CONFIG_X86)
+static void
+drm_clflush_page(struct page *page)
+{
+       uint8_t *page_virtual;
+       unsigned int i;
+
+       if (unlikely(page == NULL))
+               return;
+
+       page_virtual = kmap_atomic(page, KM_USER0);
+       for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+               clflush(page_virtual + i);
+       kunmap_atomic(page_virtual, KM_USER0);
+}
+
+static void drm_cache_flush_clflush(struct page *pages[],
+                                   unsigned long num_pages)
+{
+       unsigned long i;
+
+       mb();
+       for (i = 0; i < num_pages; i++)
+               drm_clflush_page(*pages++);
+       mb();
+}
+
+static void
+drm_clflush_ipi_handler(void *null)
+{
+       wbinvd();
+}
+#endif
+
+void
+drm_clflush_pages(struct page *pages[], unsigned long num_pages)
+{
+
+#if defined(CONFIG_X86)
+       if (cpu_has_clflush) {
+               drm_cache_flush_clflush(pages, num_pages);
+               return;
+       }
+
+       if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+               printk(KERN_ERR "Timed out waiting for cache flush.\n");
+
+#elif defined(__powerpc__)
+       unsigned long i;
+       for (i = 0; i < num_pages; i++) {
+               struct page *page = pages[i];
+               void *page_virtual;
+
+               if (unlikely(page == NULL))
+                       continue;
+
+               page_virtual = kmap_atomic(page, KM_USER0);
+               flush_dcache_range((unsigned long)page_virtual,
+                                  (unsigned long)page_virtual + PAGE_SIZE);
+               kunmap_atomic(page_virtual, KM_USER0);
+       }
+#else
+       printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+       WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_pages);
diff --git a/services4/3rdparty/linux_drm/drm_context.c b/services4/3rdparty/linux_drm/drm_context.c
new file mode 100644 (file)
index 0000000..6d440fb
--- /dev/null
@@ -0,0 +1,461 @@
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * ChangeLog:
+ *  2001-11-16 Torsten Duwe <duwe@caldera.de>
+ *             added context constructor/destructor hooks,
+ *             needed by SiS driver's memory management.
+ */
+
+#include "drmP.h"
+
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+/**
+ * Free a handle from the context bitmap.
+ *
+ * \param dev DRM device.
+ * \param ctx_handle context handle.
+ *
+ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
+ * lock.
+ */
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_remove(&dev->ctx_idr, ctx_handle);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Context bitmap allocation.
+ *
+ * \param dev DRM device.
+ * \return (non-negative) context handle on success or a negative number on failure.
+ *
+ * Allocate a new idr from drm_device::ctx_idr while holding the
+ * drm_device::struct_mutex lock.
+ */
+static int drm_ctxbitmap_next(struct drm_device * dev)
+{
+       int new_id;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+       mutex_lock(&dev->struct_mutex);
+       ret = idr_get_new_above(&dev->ctx_idr, NULL,
+                               DRM_RESERVED_CONTEXTS, &new_id);
+       if (ret == -EAGAIN) {
+               mutex_unlock(&dev->struct_mutex);
+               goto again;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return new_id;
+}
+
+/**
+ * Context bitmap initialization.
+ *
+ * \param dev DRM device.
+ *
+ * Initialise the drm_device::ctx_idr
+ */
+int drm_ctxbitmap_init(struct drm_device * dev)
+{
+       idr_init(&dev->ctx_idr);
+       return 0;
+}
+
+/**
+ * Context bitmap cleanup.
+ *
+ * \param dev DRM device.
+ *
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
+ */
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_remove_all(&dev->ctx_idr);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name Per Context SAREA Support */
+/*@{*/
+
+/**
+ * Get per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Gets the map from drm_device::ctx_idr with the handle specified and
+ * returns its handle.
+ */
+int drm_getsareactx(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_ctx_priv_map *request = data;
+       struct drm_local_map *map;
+       struct drm_map_list *_entry;
+
+       mutex_lock(&dev->struct_mutex);
+
+       map = idr_find(&dev->ctx_idr, request->ctx_id);
+       if (!map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->handle = NULL;
+       list_for_each_entry(_entry, &dev->maplist, head) {
+               if (_entry->map == map) {
+                       request->handle =
+                           (void *)(unsigned long)_entry->user_token;
+                       break;
+               }
+       }
+       if (request->handle == NULL)
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * Set per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the mapping specified in \p arg and update the entry in
+ * drm_device::ctx_idr with it.
+ */
+int drm_setsareactx(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_ctx_priv_map *request = data;
+       struct drm_local_map *map = NULL;
+       struct drm_map_list *r_list = NULL;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               if (r_list->map
+                   && r_list->user_token == (unsigned long) request->handle)
+                       goto found;
+       }
+      bad:
+       mutex_unlock(&dev->struct_mutex);
+       return -EINVAL;
+
+      found:
+       map = r_list->map;
+       if (!map)
+               goto bad;
+
+       if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
+               goto bad;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name The actual DRM context handling routines */
+/*@{*/
+
+/**
+ * Switch context.
+ *
+ * \param dev DRM device.
+ * \param old old context handle.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to set drm_device::context_flag.
+ */
+static int drm_context_switch(struct drm_device * dev, int old, int new)
+{
+       if (test_and_set_bit(0, &dev->context_flag)) {
+               DRM_ERROR("Reentering -- FIXME\n");
+               return -EBUSY;
+       }
+
+       DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+       if (new == dev->last_context) {
+               clear_bit(0, &dev->context_flag);
+               return 0;
+       }
+
+       return 0;
+}
+
+/**
+ * Complete context switch.
+ *
+ * \param dev DRM device.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
+ * hardware lock is held, clears the drm_device::context_flag and wakes up
+ * drm_device::context_wait.
+ */
+static int drm_context_switch_complete(struct drm_device *dev,
+                                      struct drm_file *file_priv, int new)
+{
+       dev->last_context = new;        /* PRE/POST: This is the _only_ writer. */
+       dev->last_switch = jiffies;
+
+       if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
+               DRM_ERROR("Lock isn't held after context switch\n");
+       }
+
+       /* If a context switch is ever initiated
+          when the kernel holds the lock, release
+          that lock here. */
+       clear_bit(0, &dev->context_flag);
+       wake_up(&dev->context_wait);
+
+       return 0;
+}
+
+/**
+ * Reserve contexts.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_res structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_resctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx_res *res = data;
+       struct drm_ctx ctx;
+       int i;
+
+       if (res->count >= DRM_RESERVED_CONTEXTS) {
+               memset(&ctx, 0, sizeof(ctx));
+               for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+                       ctx.handle = i;
+                       if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
+                               return -EFAULT;
+               }
+       }
+       res->count = DRM_RESERVED_CONTEXTS;
+
+       return 0;
+}
+
+/**
+ * Add context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Get a new handle for the context and copy to userspace.
+ */
+int drm_addctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx_list *ctx_entry;
+       struct drm_ctx *ctx = data;
+
+       ctx->handle = drm_ctxbitmap_next(dev);
+       if (ctx->handle == DRM_KERNEL_CONTEXT) {
+               /* Skip kernel's context and get a new one. */
+               ctx->handle = drm_ctxbitmap_next(dev);
+       }
+       DRM_DEBUG("%d\n", ctx->handle);
+       if (ctx->handle == -1) {
+               DRM_DEBUG("Not enough free contexts.\n");
+               /* Should this return -EBUSY instead? */
+               return -ENOMEM;
+       }
+
+       ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
+       if (!ctx_entry) {
+               DRM_DEBUG("out of memory\n");
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&ctx_entry->head);
+       ctx_entry->handle = ctx->handle;
+       ctx_entry->tag = file_priv;
+
+       mutex_lock(&dev->ctxlist_mutex);
+       list_add(&ctx_entry->head, &dev->ctxlist);
+       ++dev->ctx_count;
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       return 0;
+}
+
+int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       /* This does nothing */
+       return 0;
+}
+
+/**
+ * Get context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       /* This is 0, because we don't handle any context flags */
+       ctx->flags = 0;
+
+       return 0;
+}
+
+/**
+ * Switch context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch().
+ */
+int drm_switchctx(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       return drm_context_switch(dev, dev->last_context, ctx->handle);
+}
+
+/**
+ * New context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch_complete().
+ */
+int drm_newctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       drm_context_switch_complete(dev, file_priv, ctx->handle);
+
+       return 0;
+}
+
+/**
+ * Remove context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
+ */
+int drm_rmctx(struct drm_device *dev, void *data,
+             struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       if (ctx->handle != DRM_KERNEL_CONTEXT) {
+               if (dev->driver->context_dtor)
+                       dev->driver->context_dtor(dev, ctx->handle);
+               drm_ctxbitmap_free(dev, ctx->handle);
+       }
+
+       mutex_lock(&dev->ctxlist_mutex);
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->handle == ctx->handle) {
+                               list_del(&pos->head);
+                               kfree(pos);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       return 0;
+}
+
+/*@}*/
diff --git a/services4/3rdparty/linux_drm/drm_crtc.c b/services4/3rdparty/linux_drm/drm_crtc.c
new file mode 100644 (file)
index 0000000..2baa670
--- /dev/null
@@ -0,0 +1,2676 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Keith Packard
+ *     Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "drm.h"
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+
+struct drm_prop_enum_list {
+       int type;
+       char *name;
+};
+
+/* Avoid boilerplate.  I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list)                         \
+       char *fnname(int val)                                   \
+       {                                                       \
+               int i;                                          \
+               for (i = 0; i < ARRAY_SIZE(list); i++) {        \
+                       if (list[i].type == val)                \
+                               return list[i].name;            \
+               }                                               \
+               return "(unknown)";                             \
+       }
+
+/*
+ * Global properties
+ */
+static struct drm_prop_enum_list drm_dpms_enum_list[] =
+{      { DRM_MODE_DPMS_ON, "On" },
+       { DRM_MODE_DPMS_STANDBY, "Standby" },
+       { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+       { DRM_MODE_DPMS_OFF, "Off" }
+};
+
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/*
+ * Optional properties
+ */
+static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+{
+       { DRM_MODE_SCALE_NONE, "None" },
+       { DRM_MODE_SCALE_FULLSCREEN, "Full" },
+       { DRM_MODE_SCALE_CENTER, "Center" },
+       { DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+{
+       { DRM_MODE_DITHERING_OFF, "Off" },
+       { DRM_MODE_DITHERING_ON, "On" },
+       { DRM_MODE_DITHERING_AUTO, "Automatic" },
+};
+
+/*
+ * Non-global properties, but "required" for certain connectors.
+ */
+static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+                drm_dvi_i_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+                drm_tv_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+       { DRM_MODE_DIRTY_OFF,      "Off"      },
+       { DRM_MODE_DIRTY_ON,       "On"       },
+       { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+                drm_dirty_info_enum_list)
+
+struct drm_conn_prop_enum_list {
+       int type;
+       char *name;
+       int count;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+{      { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
+       { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
+       { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
+       { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
+       { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
+       { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
+       { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
+       { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
+       { DRM_MODE_CONNECTOR_Component, "Component", 0 },
+       { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
+       { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
+       { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
+       { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
+       { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+       { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
+};
+
+static struct drm_prop_enum_list drm_encoder_enum_list[] =
+{      { DRM_MODE_ENCODER_NONE, "None" },
+       { DRM_MODE_ENCODER_DAC, "DAC" },
+       { DRM_MODE_ENCODER_TMDS, "TMDS" },
+       { DRM_MODE_ENCODER_LVDS, "LVDS" },
+       { DRM_MODE_ENCODER_TVDAC, "TV" },
+};
+
+char *drm_get_encoder_name(struct drm_encoder *encoder)
+{
+       static char buf[32];
+
+       snprintf(buf, 32, "%s-%d",
+                drm_encoder_enum_list[encoder->encoder_type].name,
+                encoder->base.id);
+       return buf;
+}
+EXPORT_SYMBOL(drm_get_encoder_name);
+
+char *drm_get_connector_name(struct drm_connector *connector)
+{
+       static char buf[32];
+
+       snprintf(buf, 32, "%s-%d",
+                drm_connector_enum_list[connector->connector_type].name,
+                connector->connector_type_id);
+       return buf;
+}
+EXPORT_SYMBOL(drm_get_connector_name);
+
+char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+       if (status == connector_status_connected)
+               return "connected";
+       else if (status == connector_status_disconnected)
+               return "disconnected";
+       else
+               return "unknown";
+}
+
+/**
+ * drm_mode_object_get - allocate a new identifier
+ * @dev: DRM device
+ * @ptr: object pointer, used to generate unique ID
+ * @type: object type
+ *
+ * LOCKING:
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space.  Used
+ * for tracking modes, CRTCs and connectors.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+static int drm_mode_object_get(struct drm_device *dev,
+                              struct drm_mode_object *obj, uint32_t obj_type)
+{
+       int new_id = 0;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Ran out memory getting a mode number\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.idr_mutex);
+       ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+       mutex_unlock(&dev->mode_config.idr_mutex);
+       if (ret == -EAGAIN)
+               goto again;
+
+       obj->id = new_id;
+       obj->type = obj_type;
+       return 0;
+}
+
+/**
+ * drm_mode_object_put - free an identifer
+ * @dev: DRM device
+ * @id: ID to free
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+static void drm_mode_object_put(struct drm_device *dev,
+                               struct drm_mode_object *object)
+{
+       mutex_lock(&dev->mode_config.idr_mutex);
+       idr_remove(&dev->mode_config.crtc_idr, object->id);
+       mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+               uint32_t id, uint32_t type)
+{
+       struct drm_mode_object *obj = NULL;
+
+       mutex_lock(&dev->mode_config.idr_mutex);
+       obj = idr_find(&dev->mode_config.crtc_idr, id);
+       if (!obj || (obj->type != type) || (obj->id != id))
+               obj = NULL;
+       mutex_unlock(&dev->mode_config.idr_mutex);
+
+       return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+                        const struct drm_framebuffer_funcs *funcs)
+{
+       int ret;
+
+       ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+       if (ret) {
+               return ret;
+       }
+
+       fb->dev = dev;
+       fb->funcs = funcs;
+       dev->mode_config.num_fb++;
+       list_add(&fb->head, &dev->mode_config.fb_list);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs in @dev's mode_config.  If they're using @fb, removes
+ * it, setting it to NULL.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = fb->dev;
+       struct drm_crtc *crtc;
+       struct drm_mode_set set;
+       int ret;
+
+       /* remove from any CRTC */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (crtc->fb == fb) {
+                       /* should turn off the crtc */
+                       memset(&set, 0, sizeof(struct drm_mode_set));
+                       set.crtc = crtc;
+                       set.fb = NULL;
+                       ret = crtc->funcs->set_config(&set);
+                       if (ret)
+                               DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+               }
+       }
+
+       drm_mode_object_put(dev, &fb->base);
+       list_del(&fb->head);
+       dev->mode_config.num_fb--;
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_crtc_init - Initialise a new CRTC object
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Inits a new object created as base part of an driver crtc object.
+ */
+void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+                  const struct drm_crtc_funcs *funcs)
+{
+       crtc->dev = dev;
+       crtc->funcs = funcs;
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+
+       list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+       dev->mode_config.num_crtc++;
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_crtc_init);
+
+/**
+ * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * @crtc: CRTC to cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Cleanup @crtc. Removes from drm modesetting space
+ * does NOT free object, caller does that.
+ */
+void drm_crtc_cleanup(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+
+       if (crtc->gamma_store) {
+               kfree(crtc->gamma_store);
+               crtc->gamma_store = NULL;
+       }
+
+       drm_mode_object_put(dev, &crtc->base);
+       list_del(&crtc->head);
+       dev->mode_config.num_crtc--;
+}
+EXPORT_SYMBOL(drm_crtc_cleanup);
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Add @mode to @connector's mode list for later use.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+                        struct drm_display_mode *mode)
+{
+       list_add(&mode->head, &connector->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Remove @mode from @connector's mode list, then free it.
+ */
+void drm_mode_remove(struct drm_connector *connector,
+                    struct drm_display_mode *mode)
+{
+       list_del(&mode->head);
+       kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_remove);
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @name: user visible name of the connector
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ */
+void drm_connector_init(struct drm_device *dev,
+                    struct drm_connector *connector,
+                    const struct drm_connector_funcs *funcs,
+                    int connector_type)
+{
+       mutex_lock(&dev->mode_config.mutex);
+
+       connector->dev = dev;
+       connector->funcs = funcs;
+       drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+       connector->connector_type = connector_type;
+       connector->connector_type_id =
+               ++drm_connector_enum_list[connector_type].count; /* TODO */
+       INIT_LIST_HEAD(&connector->user_modes);
+       INIT_LIST_HEAD(&connector->probed_modes);
+       INIT_LIST_HEAD(&connector->modes);
+       connector->edid_blob_ptr = NULL;
+
+       list_add_tail(&connector->head, &dev->mode_config.connector_list);
+       dev->mode_config.num_connector++;
+
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.edid_property, 0);
+
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.dpms_property, 0);
+
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode, *t;
+
+       list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+               drm_mode_remove(connector, mode);
+
+       list_for_each_entry_safe(mode, t, &connector->modes, head)
+               drm_mode_remove(connector, mode);
+
+       list_for_each_entry_safe(mode, t, &connector->user_modes, head)
+               drm_mode_remove(connector, mode);
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_put(dev, &connector->base);
+       list_del(&connector->head);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+void drm_encoder_init(struct drm_device *dev,
+                     struct drm_encoder *encoder,
+                     const struct drm_encoder_funcs *funcs,
+                     int encoder_type)
+{
+       mutex_lock(&dev->mode_config.mutex);
+
+       encoder->dev = dev;
+
+       drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+       encoder->encoder_type = encoder_type;
+       encoder->funcs = funcs;
+
+       list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+       dev->mode_config.num_encoder++;
+
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_put(dev, &encoder->base);
+       list_del(&encoder->head);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+       struct drm_display_mode *nmode;
+
+       nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+       if (!nmode)
+               return NULL;
+
+       drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
+       return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       drm_mode_object_put(dev, &mode->base);
+
+       kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+{
+       struct drm_property *edid;
+       struct drm_property *dpms;
+       int i;
+
+       /*
+        * Standard properties (apply to all connectors)
+        */
+       edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+                                  DRM_MODE_PROP_IMMUTABLE,
+                                  "EDID", 0);
+       dev->mode_config.edid_property = edid;
+
+       dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                  "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
+               drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
+                                     drm_dpms_enum_list[i].name);
+       dev->mode_config.dpms_property = dpms;
+
+       return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+       struct drm_property *dvi_i_selector;
+       struct drm_property *dvi_i_subconnector;
+       int i;
+
+       if (dev->mode_config.dvi_i_select_subconnector_property)
+               return 0;
+
+       dvi_i_selector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                   "select subconnector",
+                                   ARRAY_SIZE(drm_dvi_i_select_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
+               drm_property_add_enum(dvi_i_selector, i,
+                                     drm_dvi_i_select_enum_list[i].type,
+                                     drm_dvi_i_select_enum_list[i].name);
+       dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+       dvi_i_subconnector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE,
+                                   "subconnector",
+                                   ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
+               drm_property_add_enum(dvi_i_subconnector, i,
+                                     drm_dvi_i_subconnector_enum_list[i].type,
+                                     drm_dvi_i_subconnector_enum_list[i].name);
+       dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.  Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+                                 char *modes[])
+{
+       struct drm_property *tv_selector;
+       struct drm_property *tv_subconnector;
+       int i;
+
+       if (dev->mode_config.tv_select_subconnector_property)
+               return 0;
+
+       /*
+        * Basic connector properties
+        */
+       tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                         "select subconnector",
+                                         ARRAY_SIZE(drm_tv_select_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
+               drm_property_add_enum(tv_selector, i,
+                                     drm_tv_select_enum_list[i].type,
+                                     drm_tv_select_enum_list[i].name);
+       dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+       tv_subconnector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE, "subconnector",
+                                   ARRAY_SIZE(drm_tv_subconnector_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
+               drm_property_add_enum(tv_subconnector, i,
+                                     drm_tv_subconnector_enum_list[i].type,
+                                     drm_tv_subconnector_enum_list[i].name);
+       dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+       /*
+        * Other, TV specific properties: margins & TV modes.
+        */
+       dev->mode_config.tv_left_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "left margin", 2);
+       dev->mode_config.tv_left_margin_property->values[0] = 0;
+       dev->mode_config.tv_left_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_right_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "right margin", 2);
+       dev->mode_config.tv_right_margin_property->values[0] = 0;
+       dev->mode_config.tv_right_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_top_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "top margin", 2);
+       dev->mode_config.tv_top_margin_property->values[0] = 0;
+       dev->mode_config.tv_top_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_bottom_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "bottom margin", 2);
+       dev->mode_config.tv_bottom_margin_property->values[0] = 0;
+       dev->mode_config.tv_bottom_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_mode_property =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                   "mode", num_modes);
+       for (i = 0; i < num_modes; i++)
+               drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+                                     i, modes[i]);
+
+       dev->mode_config.tv_brightness_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "brightness", 2);
+       dev->mode_config.tv_brightness_property->values[0] = 0;
+       dev->mode_config.tv_brightness_property->values[1] = 100;
+
+       dev->mode_config.tv_contrast_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "contrast", 2);
+       dev->mode_config.tv_contrast_property->values[0] = 0;
+       dev->mode_config.tv_contrast_property->values[1] = 100;
+
+       dev->mode_config.tv_flicker_reduction_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "flicker reduction", 2);
+       dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
+       dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
+
+       dev->mode_config.tv_overscan_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "overscan", 2);
+       dev->mode_config.tv_overscan_property->values[0] = 0;
+       dev->mode_config.tv_overscan_property->values[1] = 100;
+
+       dev->mode_config.tv_saturation_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "saturation", 2);
+       dev->mode_config.tv_saturation_property->values[0] = 0;
+       dev->mode_config.tv_saturation_property->values[1] = 100;
+
+       dev->mode_config.tv_hue_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "hue", 2);
+       dev->mode_config.tv_hue_property->values[0] = 0;
+       dev->mode_config.tv_hue_property->values[1] = 100;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+       struct drm_property *scaling_mode;
+       int i;
+
+       if (dev->mode_config.scaling_mode_property)
+               return 0;
+
+       scaling_mode =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
+                                   ARRAY_SIZE(drm_scaling_mode_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
+               drm_property_add_enum(scaling_mode, i,
+                                     drm_scaling_mode_enum_list[i].type,
+                                     drm_scaling_mode_enum_list[i].name);
+
+       dev->mode_config.scaling_mode_property = scaling_mode;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_dithering_property - create dithering property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dithering_property(struct drm_device *dev)
+{
+       struct drm_property *dithering_mode;
+       int i;
+
+       if (dev->mode_config.dithering_mode_property)
+               return 0;
+
+       dithering_mode =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
+                                   ARRAY_SIZE(drm_dithering_mode_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
+               drm_property_add_enum(dithering_mode, i,
+                                     drm_dithering_mode_enum_list[i].type,
+                                     drm_dithering_mode_enum_list[i].name);
+       dev->mode_config.dithering_mode_property = dithering_mode;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dithering_property);
+
+/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+       struct drm_property *dirty_info;
+       int i;
+
+       if (dev->mode_config.dirty_info_property)
+               return 0;
+
+       dirty_info =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE,
+                                   "dirty",
+                                   ARRAY_SIZE(drm_dirty_info_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
+               drm_property_add_enum(dirty_info, i,
+                                     drm_dirty_info_enum_list[i].type,
+                                     drm_dirty_info_enum_list[i].name);
+       dev->mode_config.dirty_info_property = dirty_info;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * None, should happen single threaded at init time.
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+       mutex_init(&dev->mode_config.mutex);
+       mutex_init(&dev->mode_config.idr_mutex);
+       INIT_LIST_HEAD(&dev->mode_config.fb_list);
+       INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+       INIT_LIST_HEAD(&dev->mode_config.connector_list);
+       INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+       idr_init(&dev->mode_config.crtc_idr);
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_create_standard_connector_properties(dev);
+       mutex_unlock(&dev->mode_config.mutex);
+
+       /* Just to be sure */
+       dev->mode_config.num_fb = 0;
+       dev->mode_config.num_connector = 0;
+       dev->mode_config.num_crtc = 0;
+       dev->mode_config.num_encoder = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+{
+       uint32_t total_objects = 0;
+
+       total_objects += dev->mode_config.num_crtc;
+       total_objects += dev->mode_config.num_connector;
+       total_objects += dev->mode_config.num_encoder;
+
+       if (total_objects == 0)
+               return -EINVAL;
+
+       group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
+       if (!group->id_list)
+               return -ENOMEM;
+
+       group->num_crtcs = 0;
+       group->num_connectors = 0;
+       group->num_encoders = 0;
+       return 0;
+}
+
+int drm_mode_group_init_legacy_group(struct drm_device *dev,
+                                    struct drm_mode_group *group)
+{
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       struct drm_connector *connector;
+       int ret;
+
+       if ((ret = drm_mode_group_init(dev, group)))
+               return ret;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               group->id_list[group->num_crtcs++] = crtc->base.id;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+               group->id_list[group->num_crtcs + group->num_encoders++] =
+               encoder->base.id;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               group->id_list[group->num_crtcs + group->num_encoders +
+                              group->num_connectors++] = connector->base.id;
+
+       return 0;
+}
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+       struct drm_connector *connector, *ot;
+       struct drm_crtc *crtc, *ct;
+       struct drm_encoder *encoder, *enct;
+       struct drm_framebuffer *fb, *fbt;
+       struct drm_property *property, *pt;
+
+       list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+                                head) {
+               encoder->funcs->destroy(encoder);
+       }
+
+       list_for_each_entry_safe(connector, ot,
+                                &dev->mode_config.connector_list, head) {
+               connector->funcs->destroy(connector);
+       }
+
+       list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+                                head) {
+               drm_property_destroy(dev, property);
+       }
+
+       list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+               fb->funcs->destroy(fb);
+       }
+
+       list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+               crtc->funcs->destroy(crtc);
+       }
+
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+                              struct drm_display_mode *in)
+{
+       out->clock = in->clock;
+       out->hdisplay = in->hdisplay;
+       out->hsync_start = in->hsync_start;
+       out->hsync_end = in->hsync_end;
+       out->htotal = in->htotal;
+       out->hskew = in->hskew;
+       out->vdisplay = in->vdisplay;
+       out->vsync_start = in->vsync_start;
+       out->vsync_end = in->vsync_end;
+       out->vtotal = in->vtotal;
+       out->vscan = in->vscan;
+       out->vrefresh = in->vrefresh;
+       out->flags = in->flags;
+       out->type = in->type;
+       strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+       out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ */
+void drm_crtc_convert_umode(struct drm_display_mode *out,
+                           struct drm_mode_modeinfo *in)
+{
+       out->clock = in->clock;
+       out->hdisplay = in->hdisplay;
+       out->hsync_start = in->hsync_start;
+       out->hsync_end = in->hsync_end;
+       out->htotal = in->htotal;
+       out->hskew = in->hskew;
+       out->vdisplay = in->vdisplay;
+       out->vsync_start = in->vsync_start;
+       out->vsync_end = in->vsync_end;
+       out->vtotal = in->vtotal;
+       out->vscan = in->vscan;
+       out->vrefresh = in->vrefresh;
+       out->flags = in->flags;
+       out->type = in->type;
+       strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+       out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_card_res *card_res = data;
+       struct list_head *lh;
+       struct drm_framebuffer *fb;
+       struct drm_connector *connector;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       int ret = 0;
+       int connector_count = 0;
+       int crtc_count = 0;
+       int fb_count = 0;
+       int encoder_count = 0;
+       int copied = 0, i;
+       uint32_t __user *fb_id;
+       uint32_t __user *crtc_id;
+       uint32_t __user *connector_id;
+       uint32_t __user *encoder_id;
+       struct drm_mode_group *mode_group;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /*
+        * For the non-control nodes we need to limit the list of resources
+        * by IDs in the group list for this node
+        */
+       list_for_each(lh, &file_priv->fbs)
+               fb_count++;
+
+       mode_group = &file_priv->master->minor->mode_group;
+       if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+
+               list_for_each(lh, &dev->mode_config.crtc_list)
+                       crtc_count++;
+
+               list_for_each(lh, &dev->mode_config.connector_list)
+                       connector_count++;
+
+               list_for_each(lh, &dev->mode_config.encoder_list)
+                       encoder_count++;
+       } else {
+
+               crtc_count = mode_group->num_crtcs;
+               connector_count = mode_group->num_connectors;
+               encoder_count = mode_group->num_encoders;
+       }
+
+       card_res->max_height = dev->mode_config.max_height;
+       card_res->min_height = dev->mode_config.min_height;
+       card_res->max_width = dev->mode_config.max_width;
+       card_res->min_width = dev->mode_config.min_width;
+
+       /* handle this in 4 parts */
+       /* FBs */
+       if (card_res->count_fbs >= fb_count) {
+               copied = 0;
+               fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+               list_for_each_entry(fb, &file_priv->fbs, head) {
+                       if (put_user(fb->base.id, fb_id + copied)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       card_res->count_fbs = fb_count;
+
+       /* CRTCs */
+       if (card_res->count_crtcs >= crtc_count) {
+               copied = 0;
+               crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                                           head) {
+                               DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+                               if (put_user(crtc->base.id, crtc_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       for (i = 0; i < mode_group->num_crtcs; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            crtc_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       card_res->count_crtcs = crtc_count;
+
+       /* Encoders */
+       if (card_res->count_encoders >= encoder_count) {
+               copied = 0;
+               encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(encoder,
+                                           &dev->mode_config.encoder_list,
+                                           head) {
+                               DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+                                               drm_get_encoder_name(encoder));
+                               if (put_user(encoder->base.id, encoder_id +
+                                            copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            encoder_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+
+               }
+       }
+       card_res->count_encoders = encoder_count;
+
+       /* Connectors */
+       if (card_res->count_connectors >= connector_count) {
+               copied = 0;
+               connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(connector,
+                                           &dev->mode_config.connector_list,
+                                           head) {
+                               DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                                       connector->base.id,
+                                       drm_get_connector_name(connector));
+                               if (put_user(connector->base.id,
+                                            connector_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       int start = mode_group->num_crtcs +
+                               mode_group->num_encoders;
+                       for (i = start; i < start + mode_group->num_connectors; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            connector_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       card_res->count_connectors = connector_count;
+
+       DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
+                 card_res->count_connectors, card_res->count_encoders);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getcrtc(struct drm_device *dev,
+                    void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc *crtc_resp = data;
+       struct drm_crtc *crtc;
+       struct drm_mode_object *obj;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+                                  DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       crtc_resp->x = crtc->x;
+       crtc_resp->y = crtc->y;
+       crtc_resp->gamma_size = crtc->gamma_size;
+       if (crtc->fb)
+               crtc_resp->fb_id = crtc->fb->base.id;
+       else
+               crtc_resp->fb_id = 0;
+
+       if (crtc->enabled) {
+
+               drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+               crtc_resp->mode_valid = 1;
+
+       } else {
+               crtc_resp->mode_valid = 0;
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getconnector - get connector configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a connector configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_get_connector *out_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_connector *connector;
+       struct drm_display_mode *mode;
+       int mode_count = 0;
+       int props_count = 0;
+       int encoders_count = 0;
+       int ret = 0;
+       int copied = 0;
+       int i;
+       struct drm_mode_modeinfo u_mode;
+       struct drm_mode_modeinfo __user *mode_ptr;
+       uint32_t __user *prop_ptr;
+       uint64_t __user *prop_values;
+       uint32_t __user *encoder_ptr;
+
+       memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, out_resp->connector_id,
+                                  DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] != 0) {
+                       props_count++;
+               }
+       }
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] != 0) {
+                       encoders_count++;
+               }
+       }
+
+       if (out_resp->count_modes == 0) {
+               connector->funcs->fill_modes(connector,
+                                            dev->mode_config.max_width,
+                                            dev->mode_config.max_height);
+       }
+
+       /* delayed so we get modes regardless of pre-fill_modes state */
+       list_for_each_entry(mode, &connector->modes, head)
+               mode_count++;
+
+       out_resp->connector_id = connector->base.id;
+       out_resp->connector_type = connector->connector_type;
+       out_resp->connector_type_id = connector->connector_type_id;
+       out_resp->mm_width = connector->display_info.width_mm;
+       out_resp->mm_height = connector->display_info.height_mm;
+       out_resp->subpixel = connector->display_info.subpixel_order;
+       out_resp->connection = connector->status;
+       if (connector->encoder)
+               out_resp->encoder_id = connector->encoder->base.id;
+       else
+               out_resp->encoder_id = 0;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if ((out_resp->count_modes >= mode_count) && mode_count) {
+               copied = 0;
+               mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+               list_for_each_entry(mode, &connector->modes, head) {
+                       drm_crtc_convert_to_umode(&u_mode, mode);
+                       if (copy_to_user(mode_ptr + copied,
+                                        &u_mode, sizeof(u_mode))) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       out_resp->count_modes = mode_count;
+
+       if ((out_resp->count_props >= props_count) && props_count) {
+               copied = 0;
+               prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
+               prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+               for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+                       if (connector->property_ids[i] != 0) {
+                               if (put_user(connector->property_ids[i],
+                                            prop_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+
+                               if (put_user(connector->property_values[i],
+                                            prop_values + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_props = props_count;
+
+       if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+               copied = 0;
+               encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+                       if (connector->encoder_ids[i] != 0) {
+                               if (put_user(connector->encoder_ids[i],
+                                            encoder_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_encoders = encoders_count;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_mode_get_encoder *enc_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+                                  DRM_MODE_OBJECT_ENCODER);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       encoder = obj_to_encoder(obj);
+
+       if (encoder->crtc)
+               enc_resp->crtc_id = encoder->crtc->base.id;
+       else
+               enc_resp->crtc_id = 0;
+       enc_resp->encoder_type = encoder->encoder_type;
+       enc_resp->encoder_id = encoder->base.id;
+       enc_resp->possible_crtcs = encoder->possible_crtcs;
+       enc_resp->possible_clones = encoder->possible_clones;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setcrtc(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_mode_crtc *crtc_req = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc, *crtcfb;
+       struct drm_connector **connector_set = NULL, *connector;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_display_mode *mode = NULL;
+       struct drm_mode_set set;
+       uint32_t __user *set_connectors_ptr;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+                                  DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+       DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+       if (crtc_req->mode_valid) {
+               /* If we have a mode we need a framebuffer. */
+               /* If we pass -1, set the mode with the currently bound fb */
+               if (crtc_req->fb_id == -1) {
+                       list_for_each_entry(crtcfb,
+                                           &dev->mode_config.crtc_list, head) {
+                               if (crtcfb == crtc) {
+                                       DRM_DEBUG_KMS("Using current fb for "
+                                                       "setmode\n");
+                                       fb = crtc->fb;
+                               }
+                       }
+               } else {
+                       obj = drm_mode_object_find(dev, crtc_req->fb_id,
+                                                  DRM_MODE_OBJECT_FB);
+                       if (!obj) {
+                               DRM_DEBUG_KMS("Unknown FB ID%d\n",
+                                               crtc_req->fb_id);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       fb = obj_to_fb(obj);
+               }
+
+               mode = drm_mode_create(dev);
+               drm_crtc_convert_umode(mode, &crtc_req->mode);
+               drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+       }
+
+       if (crtc_req->count_connectors == 0 && mode) {
+               DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
+               DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
+                         crtc_req->count_connectors);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->count_connectors > 0) {
+               u32 out_id;
+
+               /* Avoid unbounded kernel memory allocation */
+               if (crtc_req->count_connectors > config->num_connector) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               connector_set = kmalloc(crtc_req->count_connectors *
+                                       sizeof(struct drm_connector *),
+                                       GFP_KERNEL);
+               if (!connector_set) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               for (i = 0; i < crtc_req->count_connectors; i++) {
+                       set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+                       if (get_user(out_id, &set_connectors_ptr[i])) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+
+                       obj = drm_mode_object_find(dev, out_id,
+                                                  DRM_MODE_OBJECT_CONNECTOR);
+                       if (!obj) {
+                               DRM_DEBUG_KMS("Connector id %d unknown\n",
+                                               out_id);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       connector = obj_to_connector(obj);
+                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                                       connector->base.id,
+                                       drm_get_connector_name(connector));
+
+                       connector_set[i] = connector;
+               }
+       }
+
+       set.crtc = crtc;
+       set.x = crtc_req->x;
+       set.y = crtc_req->y;
+       set.mode = mode;
+       set.connectors = connector_set;
+       set.num_connectors = crtc_req->count_connectors;
+       set.fb = fb;
+       ret = crtc->funcs->set_config(&set);
+
+out:
+       kfree(connector_set);
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+                       void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_cursor *req = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       if (!req->flags) {
+               DRM_ERROR("no operation set\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       if (req->flags & DRM_MODE_CURSOR_BO) {
+               if (!crtc->funcs->cursor_set) {
+                       DRM_ERROR("crtc does not support cursor\n");
+                       ret = -ENXIO;
+                       goto out;
+               }
+               /* Turns off the cursor if handle is 0 */
+               ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+                                             req->width, req->height);
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_MOVE) {
+               if (crtc->funcs->cursor_move) {
+                       ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+               } else {
+                       DRM_ERROR("crtc does not support cursor\n");
+                       ret = -EFAULT;
+                       goto out;
+               }
+       }
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       if ((config->min_width > r->width) || (r->width > config->max_width)) {
+               DRM_ERROR("mode new framebuffer width not within limits\n");
+               return -EINVAL;
+       }
+       if ((config->min_height > r->height) || (r->height > config->max_height)) {
+               DRM_ERROR("mode new framebuffer height not within limits\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /* TODO check buffer is sufficently large */
+       /* TODO setup destructor callback */
+
+       fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+       if (IS_ERR(fb)) {
+               DRM_ERROR("could not create framebuffer\n");
+               ret = PTR_ERR(fb);
+               goto out;
+       }
+
+       r->fb_id = fb->base.id;
+       list_add(&fb->filp_head, &file_priv->fbs);
+       DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_framebuffer *fbl = NULL;
+       uint32_t *id = data;
+       int ret = 0;
+       int found = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
+       /* TODO check that we realy get a framebuffer back. */
+       if (!obj) {
+               DRM_ERROR("mode invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       fb = obj_to_fb(obj);
+
+       list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+               if (fb == fbl)
+                       found = 1;
+
+       if (!found) {
+               DRM_ERROR("tried to remove a fb that we didn't own\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* TODO release all crtc connected to the framebuffer */
+       /* TODO unhock the destructor from the buffer object */
+
+       list_del(&fb->filp_head);
+       fb->funcs->destroy(fb);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj) {
+               DRM_ERROR("invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       fb = obj_to_fb(obj);
+
+       r->height = fb->height;
+       r->width = fb->width;
+       r->depth = fb->depth;
+       r->bpp = fb->bits_per_pixel;
+       r->pitch = fb->pitch;
+       fb->funcs->create_handle(fb, file_priv, &r->handle);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_clip_rect __user *clips_ptr;
+       struct drm_clip_rect *clips = NULL;
+       struct drm_mode_fb_dirty_cmd *r = data;
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb;
+       unsigned flags;
+       int num_clips;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj) {
+               DRM_ERROR("invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out_err1;
+       }
+       fb = obj_to_fb(obj);
+
+       num_clips = r->num_clips;
+       clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+       if (!num_clips != !clips_ptr) {
+               ret = -EINVAL;
+               goto out_err1;
+       }
+
+       flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+       /* If userspace annotates copy, clips must come in pairs */
+       if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+               ret = -EINVAL;
+               goto out_err1;
+       }
+
+       if (num_clips && clips_ptr) {
+               clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+               if (!clips) {
+                       ret = -ENOMEM;
+                       goto out_err1;
+               }
+
+               ret = copy_from_user(clips, clips_ptr,
+                                    num_clips * sizeof(*clips));
+               if (ret) {
+                       ret = -EFAULT;
+                       goto out_err2;
+               }
+       }
+
+       if (fb->funcs->dirty) {
+               ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+                                      clips, num_clips);
+       } else {
+               ret = -ENOSYS;
+               goto out_err2;
+       }
+
+out_err2:
+       kfree(clips);
+out_err1:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @filp: file * from the ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_framebuffer *fb, *tfb;
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+               list_del(&fb->filp_head);
+               fb->funcs->destroy(fb);
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+/**
+ * drm_mode_attachmode - add a mode to the user mode list
+ * @dev: DRM device
+ * @connector: connector to add the mode to
+ * @mode: mode to add
+ *
+ * Add @mode to @connector's user mode list.
+ */
+static int drm_mode_attachmode(struct drm_device *dev,
+                              struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       int ret = 0;
+
+       list_add_tail(&mode->head, &connector->user_modes);
+       return ret;
+}
+
+int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
+                            struct drm_display_mode *mode)
+{
+       struct drm_connector *connector;
+       int ret = 0;
+       struct drm_display_mode *dup_mode;
+       int need_dup = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (!connector->encoder)
+                       break;
+               if (connector->encoder->crtc == crtc) {
+                       if (need_dup)
+                               dup_mode = drm_mode_duplicate(dev, mode);
+                       else
+                               dup_mode = mode;
+                       ret = drm_mode_attachmode(dev, connector, dup_mode);
+                       if (ret)
+                               return ret;
+                       need_dup = 1;
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
+
+static int drm_mode_detachmode(struct drm_device *dev,
+                              struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       int found = 0;
+       int ret = 0;
+       struct drm_display_mode *match_mode, *t;
+
+       list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
+               if (drm_mode_equal(match_mode, mode)) {
+                       list_del(&match_mode->head);
+                       drm_mode_destroy(dev, match_mode);
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               ret = -EINVAL;
+
+       return ret;
+}
+
+int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               drm_mode_detachmode(dev, connector, mode);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
+
+/**
+ * drm_fb_attachmode - Attach a user mode to an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * This attaches a user specified mode to an connector.
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_attachmode_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_mode_cmd *mode_cmd = data;
+       struct drm_connector *connector;
+       struct drm_display_mode *mode;
+       struct drm_mode_object *obj;
+       struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       mode = drm_mode_create(dev);
+       if (!mode) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       drm_crtc_convert_umode(mode, umode);
+
+       ret = drm_mode_attachmode(dev, connector, mode);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+
+/**
+ * drm_fb_detachmode - Detach a user specified mode from an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_detachmode_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_mode_cmd *mode_cmd = data;
+       struct drm_connector *connector;
+       struct drm_display_mode mode;
+       struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       drm_crtc_convert_umode(&mode, umode);
+       ret = drm_mode_detachmode(dev, connector, &mode);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+                                        const char *name, int num_values)
+{
+       struct drm_property *property = NULL;
+
+       property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+       if (!property)
+               return NULL;
+
+       if (num_values) {
+               property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+               if (!property->values)
+                       goto fail;
+       }
+
+       drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+       property->flags = flags;
+       property->num_values = num_values;
+       INIT_LIST_HEAD(&property->enum_blob_list);
+
+       if (name)
+               strncpy(property->name, name, DRM_PROP_NAME_LEN);
+
+       list_add_tail(&property->head, &dev->mode_config.property_list);
+       return property;
+fail:
+       kfree(property);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+int drm_property_add_enum(struct drm_property *property, int index,
+                         uint64_t value, const char *name)
+{
+       struct drm_property_enum *prop_enum;
+
+       if (!(property->flags & DRM_MODE_PROP_ENUM))
+               return -EINVAL;
+
+       if (!list_empty(&property->enum_blob_list)) {
+               list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+                       if (prop_enum->value == value) {
+                               strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+                               prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+                               return 0;
+                       }
+               }
+       }
+
+       prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+       if (!prop_enum)
+               return -ENOMEM;
+
+       strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+       prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+       prop_enum->value = value;
+
+       property->values[index] = value;
+       list_add_tail(&prop_enum->head, &property->enum_blob_list);
+       return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+       struct drm_property_enum *prop_enum, *pt;
+
+       list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+               list_del(&prop_enum->head);
+               kfree(prop_enum);
+       }
+
+       if (property->num_values)
+               kfree(property->values);
+       drm_mode_object_put(dev, &property->base);
+       list_del(&property->head);
+       kfree(property);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+int drm_connector_attach_property(struct drm_connector *connector,
+                              struct drm_property *property, uint64_t init_val)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == 0) {
+                       connector->property_ids[i] = property->base.id;
+                       connector->property_values[i] = init_val;
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_property);
+
+int drm_connector_property_set_value(struct drm_connector *connector,
+                                 struct drm_property *property, uint64_t value)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == property->base.id) {
+                       connector->property_values[i] = value;
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+                                 struct drm_property *property, uint64_t *val)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == property->base.id) {
+                       *val = connector->property_values[i];
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_get_property *out_resp = data;
+       struct drm_property *property;
+       int enum_count = 0;
+       int blob_count = 0;
+       int value_count = 0;
+       int ret = 0, i;
+       int copied;
+       struct drm_property_enum *prop_enum;
+       struct drm_mode_property_enum __user *enum_ptr;
+       struct drm_property_blob *prop_blob;
+       uint32_t *blob_id_ptr;
+       uint64_t __user *values_ptr;
+       uint32_t __user *blob_length_ptr;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+       if (!obj) {
+               ret = -EINVAL;
+               goto done;
+       }
+       property = obj_to_property(obj);
+
+       if (property->flags & DRM_MODE_PROP_ENUM) {
+               list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+                       enum_count++;
+       } else if (property->flags & DRM_MODE_PROP_BLOB) {
+               list_for_each_entry(prop_blob, &property->enum_blob_list, head)
+                       blob_count++;
+       }
+
+       value_count = property->num_values;
+
+       strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+       out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+       out_resp->flags = property->flags;
+
+       if ((out_resp->count_values >= value_count) && value_count) {
+               values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+               for (i = 0; i < value_count; i++) {
+                       if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+                               ret = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+       out_resp->count_values = value_count;
+
+       if (property->flags & DRM_MODE_PROP_ENUM) {
+               if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+                       copied = 0;
+                       enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+                       list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+
+                               if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               if (copy_to_user(&enum_ptr[copied].name,
+                                                &prop_enum->name, DRM_PROP_NAME_LEN)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+                               copied++;
+                       }
+               }
+               out_resp->count_enum_blobs = enum_count;
+       }
+
+       if (property->flags & DRM_MODE_PROP_BLOB) {
+               if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+                       copied = 0;
+                       blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
+                       blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+
+                       list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+                               if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               if (put_user(prop_blob->length, blob_length_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               copied++;
+                       }
+               }
+               out_resp->count_enum_blobs = blob_count;
+       }
+done:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
+                                                         void *data)
+{
+       struct drm_property_blob *blob;
+
+       if (!length || !data)
+               return NULL;
+
+       blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+       if (!blob)
+               return NULL;
+
+       blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
+       blob->length = length;
+
+       memcpy(blob->data, data, length);
+
+       drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+
+       list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
+       return blob;
+}
+
+static void drm_property_destroy_blob(struct drm_device *dev,
+                              struct drm_property_blob *blob)
+{
+       drm_mode_object_put(dev, &blob->base);
+       list_del(&blob->head);
+       kfree(blob);
+}
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_get_blob *out_resp = data;
+       struct drm_property_blob *blob;
+       int ret = 0;
+       void *blob_ptr;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+       if (!obj) {
+               ret = -EINVAL;
+               goto done;
+       }
+       blob = obj_to_blob(obj);
+
+       if (out_resp->length == blob->length) {
+               blob_ptr = (void *)(unsigned long)out_resp->data;
+               if (copy_to_user(blob_ptr, blob->data, blob->length)){
+                       ret = -EFAULT;
+                       goto done;
+               }
+       }
+       out_resp->length = blob->length;
+
+done:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+                                           struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       int ret = 0, size;
+
+       if (connector->edid_blob_ptr)
+               drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+
+       /* Delete edid, when there is none. */
+       if (!edid) {
+               connector->edid_blob_ptr = NULL;
+               ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+               return ret;
+       }
+
+       size = EDID_LENGTH * (1 + edid->extensions);
+       connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+                                                           size, edid);
+
+       ret = drm_connector_property_set_value(connector,
+                                              dev->mode_config.edid_property,
+                                              connector->edid_blob_ptr->base.id);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+                                      void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_connector_set_property *out_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_property *property;
+       struct drm_connector *connector;
+       int ret = -EINVAL;
+       int i;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == out_resp->prop_id)
+                       break;
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+               goto out;
+       }
+
+       obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+       if (!obj) {
+               goto out;
+       }
+       property = obj_to_property(obj);
+
+       if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+               goto out;
+
+       if (property->flags & DRM_MODE_PROP_RANGE) {
+               if (out_resp->value < property->values[0])
+                       goto out;
+
+               if (out_resp->value > property->values[1])
+                       goto out;
+       } else {
+               int found = 0;
+               for (i = 0; i < property->num_values; i++) {
+                       if (property->values[i] == out_resp->value) {
+                               found = 1;
+                               break;
+                       }
+               }
+               if (!found) {
+                       goto out;
+               }
+       }
+
+       /* Do DPMS ourselves */
+       if (property == connector->dev->mode_config.dpms_property) {
+               if (connector->funcs->dpms)
+                       (*connector->funcs->dpms)(connector, (int) out_resp->value);
+               ret = 0;
+       } else if (connector->funcs->set_property)
+               ret = connector->funcs->set_property(connector, property, out_resp->value);
+
+       /* store the property value if successful */
+       if (!ret)
+               drm_connector_property_set_value(connector, property, out_resp->value);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+                                     struct drm_encoder *encoder)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == 0) {
+                       connector->encoder_ids[i] = encoder->base.id;
+                       return 0;
+               }
+       }
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+                                   struct drm_encoder *encoder)
+{
+       int i;
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == encoder->base.id) {
+                       connector->encoder_ids[i] = 0;
+                       if (connector->encoder == encoder)
+                               connector->encoder = NULL;
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
+
+bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+                                 int gamma_size)
+{
+       crtc->gamma_size = gamma_size;
+
+       crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+       if (!crtc->gamma_store) {
+               crtc->gamma_size = 0;
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+
+}
+
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_page_flip *page_flip = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+       struct drm_pending_vblank_event *e = NULL;
+       unsigned long flags;
+       int ret = -EINVAL;
+
+       if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+           page_flip->reserved != 0)
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj)
+               goto out;
+       crtc = obj_to_crtc(obj);
+
+       if (crtc->fb == NULL) {
+               /* The framebuffer is currently unbound, presumably
+                * due to a hotplug event, that userspace has not
+                * yet discovered.
+                */
+               ret = -EBUSY;
+               goto out;
+       }
+
+       if (crtc->funcs->page_flip == NULL)
+               goto out;
+
+       obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj)
+               goto out;
+       fb = obj_to_fb(obj);
+
+       if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+               ret = -ENOMEM;
+               spin_lock_irqsave(&dev->event_lock, flags);
+               if (file_priv->event_space < sizeof e->event) {
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       goto out;
+               }
+               file_priv->event_space -= sizeof e->event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+
+               e = kzalloc(sizeof *e, GFP_KERNEL);
+               if (e == NULL) {
+                       spin_lock_irqsave(&dev->event_lock, flags);
+                       file_priv->event_space += sizeof e->event;
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       goto out;
+               }
+
+               e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+               e->event.base.length = sizeof e->event;
+               e->event.user_data = page_flip->user_data;
+               e->base.event = &e->event.base;
+               e->base.file_priv = file_priv;
+               e->base.destroy =
+                       (void (*) (struct drm_pending_event *)) kfree;
+       }
+
+       ret = crtc->funcs->page_flip(crtc, fb, e);
+       if (ret) {
+               spin_lock_irqsave(&dev->event_lock, flags);
+               file_priv->event_space += sizeof e->event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+               kfree(e);
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
diff --git a/services4/3rdparty/linux_drm/drm_crtc_helper.c b/services4/3rdparty/linux_drm/drm_crtc_helper.c
new file mode 100644 (file)
index 0000000..2d4e17a
--- /dev/null
@@ -0,0 +1,949 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Keith Packard
+ *     Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+
+static bool drm_kms_helper_poll = true;
+module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+
+static void drm_mode_validate_flag(struct drm_connector *connector,
+                                  int flags)
+{
+       struct drm_display_mode *mode, *t;
+
+       if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+               return;
+
+       list_for_each_entry_safe(mode, t, &connector->modes, head) {
+               if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+                               !(flags & DRM_MODE_FLAG_INTERLACE))
+                       mode->status = MODE_NO_INTERLACE;
+               if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+                               !(flags & DRM_MODE_FLAG_DBLSCAN))
+                       mode->status = MODE_NO_DBLESCAN;
+       }
+
+       return;
+}
+
+/**
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
+ * @dev: DRM device
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Based on @dev's mode_config layout, scan all the connectors and try to detect
+ * modes on them.  Modes will first be added to the connector's probed_modes
+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
+ * put into the normal modes list.
+ *
+ * Intended to be used either at bootup time or when major configuration
+ * changes have occurred.
+ *
+ * FIXME: take into account monitor limits
+ *
+ * RETURNS:
+ * Number of modes found on @connector.
+ */
+int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+                                           uint32_t maxX, uint32_t maxY)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode, *t;
+       struct drm_connector_helper_funcs *connector_funcs =
+               connector->helper_private;
+       int count = 0;
+       int mode_flags = 0;
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+                       drm_get_connector_name(connector));
+       /* set all modes to the unverified state */
+       list_for_each_entry_safe(mode, t, &connector->modes, head)
+               mode->status = MODE_UNVERIFIED;
+
+       if (connector->force) {
+               if (connector->force == DRM_FORCE_ON)
+                       connector->status = connector_status_connected;
+               else
+                       connector->status = connector_status_disconnected;
+               if (connector->funcs->force)
+                       connector->funcs->force(connector);
+       } else {
+               connector->status = connector->funcs->detect(connector, true);
+               drm_kms_helper_poll_enable(dev);
+       }
+
+       if (connector->status == connector_status_disconnected) {
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+                       connector->base.id, drm_get_connector_name(connector));
+               drm_mode_connector_update_edid_property(connector, NULL);
+               goto prune;
+       }
+
+       count = (*connector_funcs->get_modes)(connector);
+       if (count == 0 && connector->status == connector_status_connected)
+               count = drm_add_modes_noedid(connector, 1024, 768);
+       if (count == 0)
+               goto prune;
+
+       drm_mode_connector_list_update(connector);
+
+       if (maxX && maxY)
+               drm_mode_validate_size(dev, &connector->modes, maxX,
+                                      maxY, 0);
+
+       if (connector->interlace_allowed)
+               mode_flags |= DRM_MODE_FLAG_INTERLACE;
+       if (connector->doublescan_allowed)
+               mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+       drm_mode_validate_flag(connector, mode_flags);
+
+       list_for_each_entry_safe(mode, t, &connector->modes, head) {
+               if (mode->status == MODE_OK)
+                       mode->status = connector_funcs->mode_valid(connector,
+                                                                  mode);
+       }
+
+prune:
+       drm_mode_prune_invalid(dev, &connector->modes, true);
+
+       if (list_empty(&connector->modes))
+               return 0;
+
+       drm_mode_sort(&connector->modes);
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+                       drm_get_connector_name(connector));
+       list_for_each_entry_safe(mode, t, &connector->modes, head) {
+               mode->vrefresh = drm_mode_vrefresh(mode);
+
+               drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+               drm_mode_debug_printmodeline(mode);
+       }
+
+       return count;
+}
+EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
+
+/**
+ * drm_helper_encoder_in_use - check if a given encoder is in use
+ * @encoder: encoder to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @encoders's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @encoder is part of the mode_config, false otherwise.
+ */
+bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
+{
+       struct drm_connector *connector;
+       struct drm_device *dev = encoder->dev;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               if (connector->encoder == encoder)
+                       return true;
+       return false;
+}
+EXPORT_SYMBOL(drm_helper_encoder_in_use);
+
+/**
+ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
+ * @crtc: CRTC to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @crtc's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @crtc is part of the mode_config, false otherwise.
+ */
+bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
+{
+       struct drm_encoder *encoder;
+       struct drm_device *dev = crtc->dev;
+       /* FIXME: Locking around list access? */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+               if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
+                       return true;
+       return false;
+}
+EXPORT_SYMBOL(drm_helper_crtc_in_use);
+
+static void
+drm_encoder_disable(struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+       if (encoder_funcs->disable)
+               (*encoder_funcs->disable)(encoder);
+       else
+               (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+}
+
+/**
+ * drm_helper_disable_unused_functions - disable unused objects
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
+ * by calling its dpms function, which should power it off.
+ */
+void drm_helper_disable_unused_functions(struct drm_device *dev)
+{
+       struct drm_encoder *encoder;
+       struct drm_connector *connector;
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (!connector->encoder)
+                       continue;
+               if (connector->status == connector_status_disconnected)
+                       connector->encoder = NULL;
+       }
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (!drm_helper_encoder_in_use(encoder)) {
+                       drm_encoder_disable(encoder);
+                       /* disconnector encoder from any connector */
+                       encoder->crtc = NULL;
+               }
+       }
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+               crtc->enabled = drm_helper_crtc_in_use(crtc);
+               if (!crtc->enabled) {
+                       if (crtc_funcs->disable)
+                               (*crtc_funcs->disable)(crtc);
+                       else
+                               (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
+                       crtc->fb = NULL;
+               }
+       }
+}
+EXPORT_SYMBOL(drm_helper_disable_unused_functions);
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Return false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+                               struct drm_crtc *crtc)
+{
+       struct drm_device *dev;
+       struct drm_crtc *tmp;
+       int crtc_mask = 1;
+
+       WARN(!crtc, "checking null crtc?\n");
+
+       dev = crtc->dev;
+
+       list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+               if (tmp == crtc)
+                       break;
+               crtc_mask <<= 1;
+       }
+
+       if (encoder->possible_crtcs & crtc_mask)
+               return true;
+       return false;
+}
+
+/*
+ * Check the CRTC we're going to map each output to vs. its current
+ * CRTC.  If they don't match, we have to disable the output and the CRTC
+ * since the driver will have to re-route things.
+ */
+static void
+drm_crtc_prepare_encoders(struct drm_device *dev)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs;
+       struct drm_encoder *encoder;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               encoder_funcs = encoder->helper_private;
+               /* Disable unused encoders */
+               if (encoder->crtc == NULL)
+                       drm_encoder_disable(encoder);
+               /* Disable encoders whose CRTC is about to change */
+               if (encoder_funcs->get_crtc &&
+                   encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
+                       drm_encoder_disable(encoder);
+       }
+}
+
+/**
+ * drm_crtc_set_mode - set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+ * @x: width of mode
+ * @y: height of mode
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Try to set @mode on @crtc.  Give @crtc and its associated connectors a chance
+ * to fixup or reject the mode prior to trying to set it.
+ *
+ * RETURNS:
+ * True if the mode was set successfully, or false otherwise.
+ */
+bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+                             struct drm_display_mode *mode,
+                             int x, int y,
+                             struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_display_mode *adjusted_mode, saved_mode;
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       struct drm_encoder_helper_funcs *encoder_funcs;
+       int saved_x, saved_y;
+       struct drm_encoder *encoder;
+       bool ret = true;
+
+       adjusted_mode = drm_mode_duplicate(dev, mode);
+
+       crtc->enabled = drm_helper_crtc_in_use(crtc);
+
+       if (!crtc->enabled)
+               return true;
+
+       saved_mode = crtc->mode;
+       saved_x = crtc->x;
+       saved_y = crtc->y;
+
+       /* Update crtc values up front so the driver can rely on them for mode
+        * setting.
+        */
+       crtc->mode = *mode;
+       crtc->x = x;
+       crtc->y = y;
+
+       /* Pass our mode to the connectors and the CRTC to give them a chance to
+        * adjust it according to limitations or connector properties, and also
+        * a chance to reject the mode entirely.
+        */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+               if (encoder->crtc != crtc)
+                       continue;
+               encoder_funcs = encoder->helper_private;
+               if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+                                                     adjusted_mode))) {
+                       goto done;
+               }
+       }
+
+       if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+               goto done;
+       }
+       DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+       /* Prepare the encoders and CRTCs before setting the mode. */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+               if (encoder->crtc != crtc)
+                       continue;
+               encoder_funcs = encoder->helper_private;
+               /* Disable the encoders as the first thing we do. */
+               encoder_funcs->prepare(encoder);
+       }
+
+       drm_crtc_prepare_encoders(dev);
+
+       crtc_funcs->prepare(crtc);
+
+       /* Set up the DPLL and any encoders state that needs to adjust or depend
+        * on the DPLL.
+        */
+       ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+       if (!ret)
+           goto done;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+               if (encoder->crtc != crtc)
+                       continue;
+
+               DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+                       encoder->base.id, drm_get_encoder_name(encoder),
+                       mode->base.id, mode->name);
+               encoder_funcs = encoder->helper_private;
+               encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+       }
+
+       /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+       crtc_funcs->commit(crtc);
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+               if (encoder->crtc != crtc)
+                       continue;
+
+               encoder_funcs = encoder->helper_private;
+               encoder_funcs->commit(encoder);
+
+       }
+
+       /* XXX free adjustedmode */
+       drm_mode_destroy(dev, adjusted_mode);
+       /* FIXME: add subpixel order */
+done:
+       if (!ret) {
+               crtc->mode = saved_mode;
+               crtc->x = saved_x;
+               crtc->y = saved_y;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+
+
+/**
+ * drm_crtc_helper_set_config - set a new config from userspace
+ * @crtc: CRTC to setup
+ * @crtc_info: user provided configuration
+ * @new_mode: new mode to set
+ * @connector_set: set of connectors for the new config
+ * @fb: new framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the user in @crtc_info, and enable
+ * it.
+ *
+ * RETURNS:
+ * Zero. (FIXME)
+ */
+int drm_crtc_helper_set_config(struct drm_mode_set *set)
+{
+       struct drm_device *dev;
+       struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+       struct drm_encoder *save_encoders, *new_encoder, *encoder;
+       struct drm_framebuffer *old_fb = NULL;
+       bool mode_changed = false; /* if true do a full mode set */
+       bool fb_changed = false; /* if true and !mode_changed just do a flip */
+       struct drm_connector *save_connectors, *connector;
+       int count = 0, ro, fail = 0;
+       struct drm_crtc_helper_funcs *crtc_funcs;
+       int ret = 0;
+       int i;
+
+       DRM_DEBUG_KMS("\n");
+
+       if (!set)
+               return -EINVAL;
+
+       if (!set->crtc)
+               return -EINVAL;
+
+       if (!set->crtc->helper_private)
+               return -EINVAL;
+
+       crtc_funcs = set->crtc->helper_private;
+
+       if (set->fb) {
+               DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+                               set->crtc->base.id, set->fb->base.id,
+                               (int)set->num_connectors, set->x, set->y);
+       } else {
+               DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n",
+                               set->crtc->base.id, (int)set->num_connectors,
+                               set->x, set->y);
+       }
+
+       dev = set->crtc->dev;
+
+       /* Allocate space for the backup of all (non-pointer) crtc, encoder and
+        * connector data. */
+       save_crtcs = kzalloc(dev->mode_config.num_crtc *
+                            sizeof(struct drm_crtc), GFP_KERNEL);
+       if (!save_crtcs)
+               return -ENOMEM;
+
+       save_encoders = kzalloc(dev->mode_config.num_encoder *
+                               sizeof(struct drm_encoder), GFP_KERNEL);
+       if (!save_encoders) {
+               kfree(save_crtcs);
+               return -ENOMEM;
+       }
+
+       save_connectors = kzalloc(dev->mode_config.num_connector *
+                               sizeof(struct drm_connector), GFP_KERNEL);
+       if (!save_connectors) {
+               kfree(save_crtcs);
+               kfree(save_encoders);
+               return -ENOMEM;
+       }
+
+       /* Copy data. Note that driver private data is not affected.
+        * Should anything bad happen only the expected state is
+        * restored, not the drivers personal bookkeeping.
+        */
+       count = 0;
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               save_crtcs[count++] = *crtc;
+       }
+
+       count = 0;
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               save_encoders[count++] = *encoder;
+       }
+
+       count = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               save_connectors[count++] = *connector;
+       }
+
+       /* We should be able to check here if the fb has the same properties
+        * and then just flip_or_move it */
+       if (set->crtc->fb != set->fb) {
+               /* If we have no fb then treat it as a full mode set */
+               if (set->crtc->fb == NULL) {
+                       DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+                       mode_changed = true;
+               } else if (set->fb == NULL) {
+                       mode_changed = true;
+               } else
+                       fb_changed = true;
+       }
+
+       if (set->x != set->crtc->x || set->y != set->crtc->y)
+               fb_changed = true;
+
+       if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+               DRM_DEBUG_KMS("modes are different, full mode set\n");
+               drm_mode_debug_printmodeline(&set->crtc->mode);
+               drm_mode_debug_printmodeline(set->mode);
+               mode_changed = true;
+       }
+
+       /* a) traverse passed in connector list and get encoders for them */
+       count = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct drm_connector_helper_funcs *connector_funcs =
+                       connector->helper_private;
+               new_encoder = connector->encoder;
+               for (ro = 0; ro < set->num_connectors; ro++) {
+                       if (set->connectors[ro] == connector) {
+                               new_encoder = connector_funcs->best_encoder(connector);
+                               /* if we can't get an encoder for a connector
+                                  we are setting now - then fail */
+                               if (new_encoder == NULL)
+                                       /* don't break so fail path works correct */
+                                       fail = 1;
+                               break;
+                       }
+               }
+
+               if (new_encoder != connector->encoder) {
+                       DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+                       mode_changed = true;
+                       /* If the encoder is reused for another connector, then
+                        * the appropriate crtc will be set later.
+                        */
+                       if (connector->encoder)
+                               connector->encoder->crtc = NULL;
+                       connector->encoder = new_encoder;
+               }
+       }
+
+       if (fail) {
+               ret = -EINVAL;
+               goto fail;
+       }
+
+       count = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (!connector->encoder)
+                       continue;
+
+               if (connector->encoder->crtc == set->crtc)
+                       new_crtc = NULL;
+               else
+                       new_crtc = connector->encoder->crtc;
+
+               for (ro = 0; ro < set->num_connectors; ro++) {
+                       if (set->connectors[ro] == connector)
+                               new_crtc = set->crtc;
+               }
+
+               /* Make sure the new CRTC will work with the encoder */
+               if (new_crtc &&
+                   !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
+                       ret = -EINVAL;
+                       goto fail;
+               }
+               if (new_crtc != connector->encoder->crtc) {
+                       DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+                       mode_changed = true;
+                       connector->encoder->crtc = new_crtc;
+               }
+               if (new_crtc) {
+                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+                               connector->base.id, drm_get_connector_name(connector),
+                               new_crtc->base.id);
+               } else {
+                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+                               connector->base.id, drm_get_connector_name(connector));
+               }
+       }
+
+       /* mode_set_base is not a required function */
+       if (fb_changed && !crtc_funcs->mode_set_base)
+               mode_changed = true;
+
+       if (mode_changed) {
+               set->crtc->enabled = (set->mode != NULL);
+               if (set->mode != NULL) {
+                       DRM_DEBUG_KMS("attempting to set mode from"
+                                       " userspace\n");
+                       drm_mode_debug_printmodeline(set->mode);
+                       old_fb = set->crtc->fb;
+                       set->crtc->fb = set->fb;
+                       if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
+                                                     set->x, set->y,
+                                                     old_fb)) {
+                               DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+                                         set->crtc->base.id);
+                               ret = -EINVAL;
+                               goto fail;
+                       }
+               }
+               drm_helper_disable_unused_functions(dev);
+       } else if (fb_changed) {
+               set->crtc->x = set->x;
+               set->crtc->y = set->y;
+
+               old_fb = set->crtc->fb;
+               if (set->crtc->fb != set->fb)
+                       set->crtc->fb = set->fb;
+               ret = crtc_funcs->mode_set_base(set->crtc,
+                                               set->x, set->y, old_fb);
+               if (ret != 0)
+                       goto fail;
+       }
+       DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+       for (i = 0; i < set->num_connectors; i++) {
+               DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+                             drm_get_connector_name(set->connectors[i]));
+               set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+       }
+
+       kfree(save_connectors);
+       kfree(save_encoders);
+       kfree(save_crtcs);
+       return 0;
+
+fail:
+       /* Restore all previous data. */
+       count = 0;
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               *crtc = save_crtcs[count++];
+       }
+
+       count = 0;
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               *encoder = save_encoders[count++];
+       }
+
+       count = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               *connector = save_connectors[count++];
+       }
+
+       kfree(save_connectors);
+       kfree(save_encoders);
+       kfree(save_crtcs);
+       return ret;
+}
+EXPORT_SYMBOL(drm_crtc_helper_set_config);
+
+static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
+{
+       int dpms = DRM_MODE_DPMS_OFF;
+       struct drm_connector *connector;
+       struct drm_device *dev = encoder->dev;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               if (connector->encoder == encoder)
+                       if (connector->dpms < dpms)
+                               dpms = connector->dpms;
+       return dpms;
+}
+
+static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
+{
+       int dpms = DRM_MODE_DPMS_OFF;
+       struct drm_connector *connector;
+       struct drm_device *dev = crtc->dev;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               if (connector->encoder && connector->encoder->crtc == crtc)
+                       if (connector->dpms < dpms)
+                               dpms = connector->dpms;
+       return dpms;
+}
+
+/**
+ * drm_helper_connector_dpms
+ * @connector affected connector
+ * @mode DPMS mode
+ *
+ * Calls the low-level connector DPMS function, then
+ * calls appropriate encoder and crtc DPMS functions as well
+ */
+void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+{
+       struct drm_encoder *encoder = connector->encoder;
+       struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+       int old_dpms;
+
+       if (mode == connector->dpms)
+               return;
+
+       old_dpms = connector->dpms;
+       connector->dpms = mode;
+
+       /* from off to on, do crtc then encoder */
+       if (mode < old_dpms) {
+               if (crtc) {
+                       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+                       if (crtc_funcs->dpms)
+                               (*crtc_funcs->dpms) (crtc,
+                                                    drm_helper_choose_crtc_dpms(crtc));
+               }
+               if (encoder) {
+                       struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+                       if (encoder_funcs->dpms)
+                               (*encoder_funcs->dpms) (encoder,
+                                                       drm_helper_choose_encoder_dpms(encoder));
+               }
+       }
+
+       /* from on to off, do encoder then crtc */
+       if (mode > old_dpms) {
+               if (encoder) {
+                       struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+                       if (encoder_funcs->dpms)
+                               (*encoder_funcs->dpms) (encoder,
+                                                       drm_helper_choose_encoder_dpms(encoder));
+               }
+               if (crtc) {
+                       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+                       if (crtc_funcs->dpms)
+                               (*crtc_funcs->dpms) (crtc,
+                                                    drm_helper_choose_crtc_dpms(crtc));
+               }
+       }
+
+       return;
+}
+EXPORT_SYMBOL(drm_helper_connector_dpms);
+
+int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+                                  struct drm_mode_fb_cmd *mode_cmd)
+{
+       fb->width = mode_cmd->width;
+       fb->height = mode_cmd->height;
+       fb->pitch = mode_cmd->pitch;
+       fb->bits_per_pixel = mode_cmd->bpp;
+       fb->depth = mode_cmd->depth;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+
+int drm_helper_resume_force_mode(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       struct drm_encoder_helper_funcs *encoder_funcs;
+       struct drm_crtc_helper_funcs *crtc_funcs;
+       int ret;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+               if (!crtc->enabled)
+                       continue;
+
+               ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
+                                              crtc->x, crtc->y, crtc->fb);
+
+               if (ret == false)
+                       DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+
+               /* Turn off outputs that were already powered off */
+               if (drm_helper_choose_crtc_dpms(crtc)) {
+                       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+                               if(encoder->crtc != crtc)
+                                       continue;
+
+                               encoder_funcs = encoder->helper_private;
+                               if (encoder_funcs->dpms)
+                                       (*encoder_funcs->dpms) (encoder,
+                                                               drm_helper_choose_encoder_dpms(encoder));
+                       }
+
+                       crtc_funcs = crtc->helper_private;
+                       if (crtc_funcs->dpms)
+                               (*crtc_funcs->dpms) (crtc,
+                                                    drm_helper_choose_crtc_dpms(crtc));
+               }
+       }
+       /* disable the unused connectors while restoring the modesetting */
+       drm_helper_disable_unused_functions(dev);
+       return 0;
+}
+EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void output_poll_execute(struct work_struct *work)
+{
+       struct delayed_work *delayed_work = to_delayed_work(work);
+       struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
+       struct drm_connector *connector;
+       enum drm_connector_status old_status;
+       bool repoll = false, changed = false;
+
+       if (!drm_kms_helper_poll)
+               return;
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+               /* if this is HPD or polled don't check it -
+                  TV out for instance */
+               if (!connector->polled)
+                       continue;
+
+               else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
+                       repoll = true;
+
+               old_status = connector->status;
+               /* if we are connected and don't want to poll for disconnect
+                  skip it */
+               if (old_status == connector_status_connected &&
+                   !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
+                   !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+                       continue;
+
+               connector->status = connector->funcs->detect(connector, false);
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+                             connector->base.id,
+                             drm_get_connector_name(connector),
+                             old_status, connector->status);
+               if (old_status != connector->status)
+                       changed = true;
+       }
+
+       mutex_unlock(&dev->mode_config.mutex);
+
+       if (changed) {
+               /* send a uevent + call fbdev */
+               drm_sysfs_hotplug_event(dev);
+               if (dev->mode_config.funcs->output_poll_changed)
+                       dev->mode_config.funcs->output_poll_changed(dev);
+       }
+
+       if (repoll)
+               queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
+}
+
+void drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+       if (!dev->mode_config.poll_enabled)
+               return;
+       cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+
+void drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+       bool poll = false;
+       struct drm_connector *connector;
+
+       if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+               return;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (connector->polled)
+                       poll = true;
+       }
+
+       if (poll)
+               queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+       INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
+       dev->mode_config.poll_enabled = true;
+
+       drm_kms_helper_poll_enable(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+       drm_kms_helper_poll_disable(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+       if (!dev->mode_config.poll_enabled)
+               return;
+
+       /* kill timer and schedule immediate execution, this doesn't block */
+       cancel_delayed_work(&dev->mode_config.output_poll_work);
+       if (drm_kms_helper_poll)
+               queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+}
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/services4/3rdparty/linux_drm/drm_debugfs.c b/services4/3rdparty/linux_drm/drm_debugfs.c
new file mode 100644 (file)
index 0000000..9d8c892
--- /dev/null
@@ -0,0 +1,236 @@
+/**
+ * \file drm_debugfs.c
+ * debugfs support for DRM
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include "drmP.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+static struct drm_info_list drm_debugfs_list[] = {
+       {"name", drm_name_info, 0},
+       {"vm", drm_vm_info, 0},
+       {"clients", drm_clients_info, 0},
+       {"queues", drm_queues_info, 0},
+       {"bufs", drm_bufs_info, 0},
+       {"gem_names", drm_gem_name_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+       {"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
+
+
+static int drm_debugfs_open(struct inode *inode, struct file *file)
+{
+       struct drm_info_node *node = inode->i_private;
+
+       return single_open(file, node->info_ent->show, node);
+}
+
+
+static const struct file_operations drm_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_debugfs_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+
+/**
+ * Initialize a given set of debugfs files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI debugfs dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of debugfs files represented by an array of
+ * gdm_debugfs_lists in the given root directory.
+ */
+int drm_debugfs_create_files(struct drm_info_list *files, int count,
+                            struct dentry *root, struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+       struct dentry *ent;
+       struct drm_info_node *tmp;
+       char name[64];
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+               u32 features = files[i].driver_features;
+
+               if (features != 0 &&
+                   (dev->driver->driver_features & features) != features)
+                       continue;
+
+               tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+               if (tmp == NULL) {
+                       ret = -1;
+                       goto fail;
+               }
+               ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
+                                         root, tmp, &drm_debugfs_fops);
+               if (!ent) {
+                       DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
+                                 name, files[i].name);
+                       kfree(tmp);
+                       ret = -1;
+                       goto fail;
+               }
+
+               tmp->minor = minor;
+               tmp->dent = ent;
+               tmp->info_ent = &files[i];
+               list_add(&(tmp->list), &(minor->debugfs_nodes.list));
+       }
+       return 0;
+
+fail:
+       drm_debugfs_remove_files(files, count, minor);
+       return ret;
+}
+EXPORT_SYMBOL(drm_debugfs_create_files);
+
+/**
+ * Initialize the DRI debugfs filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
+ * \param root DRI debugfs dir entry.
+ *
+ * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
+ * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
+ * "/sys/kernel/debug/dri/%minor%/%name%".
+ */
+int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+                    struct dentry *root)
+{
+       struct drm_device *dev = minor->dev;
+       char name[64];
+       int ret;
+
+       INIT_LIST_HEAD(&minor->debugfs_nodes.list);
+       sprintf(name, "%d", minor_id);
+       minor->debugfs_root = debugfs_create_dir(name, root);
+       if (!minor->debugfs_root) {
+               DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
+               return -1;
+       }
+
+       ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+                                      minor->debugfs_root, minor);
+       if (ret) {
+               debugfs_remove(minor->debugfs_root);
+               minor->debugfs_root = NULL;
+               DRM_ERROR("Failed to create core drm debugfs files\n");
+               return ret;
+       }
+
+       if (dev->driver->debugfs_init) {
+               ret = dev->driver->debugfs_init(minor);
+               if (ret) {
+                       DRM_ERROR("DRM: Driver failed to initialize "
+                                 "/sys/kernel/debug/dri.\n");
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+
+/**
+ * Remove a list of debugfs files
+ *
+ * \param files The list of files
+ * \param count The number of files
+ * \param minor The minor of which we should remove the files
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+                            struct drm_minor *minor)
+{
+       struct list_head *pos, *q;
+       struct drm_info_node *tmp;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
+                       tmp = list_entry(pos, struct drm_info_node, list);
+                       if (tmp->info_ent == &files[i]) {
+                               debugfs_remove(tmp->dent);
+                               list_del(pos);
+                               kfree(tmp);
+                       }
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_debugfs_remove_files);
+
+/**
+ * Cleanup the debugfs filesystem resources.
+ *
+ * \param minor device minor number.
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_cleanup(struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+
+       if (!minor->debugfs_root)
+               return 0;
+
+       if (dev->driver->debugfs_cleanup)
+               dev->driver->debugfs_cleanup(minor);
+
+       drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
+
+       debugfs_remove(minor->debugfs_root);
+       minor->debugfs_root = NULL;
+
+       return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/services4/3rdparty/linux_drm/drm_dma.c b/services4/3rdparty/linux_drm/drm_dma.c
new file mode 100644 (file)
index 0000000..252cbd7
--- /dev/null
@@ -0,0 +1,161 @@
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Initialize the DMA data.
+ *
+ * \param dev DRM device.
+ * \return zero on success or a negative value on failure.
+ *
+ * Allocate and initialize a drm_device_dma structure.
+ */
+int drm_dma_setup(struct drm_device *dev)
+{
+       int i;
+
+       dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
+       if (!dev->dma)
+               return -ENOMEM;
+
+       for (i = 0; i <= DRM_MAX_ORDER; i++)
+               memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
+
+       return 0;
+}
+
+/**
+ * Cleanup the DMA resources.
+ *
+ * \param dev DRM device.
+ *
+ * Free all pages associated with DMA buffers, the buffers and pages lists, and
+ * finally the drm_device::dma structure itself.
+ */
+void drm_dma_takedown(struct drm_device *dev)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int i, j;
+
+       if (!dma)
+               return;
+
+       /* Clear dma buffers */
+       for (i = 0; i <= DRM_MAX_ORDER; i++) {
+               if (dma->bufs[i].seg_count) {
+                       DRM_DEBUG("order %d: buf_count = %d,"
+                                 " seg_count = %d\n",
+                                 i,
+                                 dma->bufs[i].buf_count,
+                                 dma->bufs[i].seg_count);
+                       for (j = 0; j < dma->bufs[i].seg_count; j++) {
+                               if (dma->bufs[i].seglist[j]) {
+                                       drm_pci_free(dev, dma->bufs[i].seglist[j]);
+                               }
+                       }
+                       kfree(dma->bufs[i].seglist);
+               }
+               if (dma->bufs[i].buf_count) {
+                       for (j = 0; j < dma->bufs[i].buf_count; j++) {
+                               kfree(dma->bufs[i].buflist[j].dev_private);
+                       }
+                       kfree(dma->bufs[i].buflist);
+               }
+       }
+
+       kfree(dma->buflist);
+       kfree(dma->pagelist);
+       kfree(dev->dma);
+       dev->dma = NULL;
+}
+
+/**
+ * Free a buffer.
+ *
+ * \param dev DRM device.
+ * \param buf buffer to free.
+ *
+ * Resets the fields of \p buf.
+ */
+void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
+{
+       if (!buf)
+               return;
+
+       buf->waiting = 0;
+       buf->pending = 0;
+       buf->file_priv = NULL;
+       buf->used = 0;
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
+           && waitqueue_active(&buf->dma_wait)) {
+               wake_up_interruptible(&buf->dma_wait);
+       }
+}
+
+/**
+ * Reclaim the buffers.
+ *
+ * \param file_priv DRM file private.
+ *
+ * Frees each buffer associated with \p file_priv not already on the hardware.
+ */
+void drm_core_reclaim_buffers(struct drm_device *dev,
+                             struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int i;
+
+       if (!dma)
+               return;
+       for (i = 0; i < dma->buf_count; i++) {
+               if (dma->buflist[i]->file_priv == file_priv) {
+                       switch (dma->buflist[i]->list) {
+                       case DRM_LIST_NONE:
+                               drm_free_buffer(dev, dma->buflist[i]);
+                               break;
+                       case DRM_LIST_WAIT:
+                               dma->buflist[i]->list = DRM_LIST_RECLAIM;
+                               break;
+                       default:
+                               /* Buffer already on hardware. */
+                               break;
+                       }
+               }
+       }
+}
+
+EXPORT_SYMBOL(drm_core_reclaim_buffers);
diff --git a/services4/3rdparty/linux_drm/drm_dp_i2c_helper.c b/services4/3rdparty/linux_drm/drm_dp_i2c_helper.c
new file mode 100644 (file)
index 0000000..f7eba0a
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * Copyright Â© 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/i2c.h>
+#include "drm_dp_helper.h"
+#include "drmP.h"
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+                           uint8_t write_byte, uint8_t *read_byte)
+{
+       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+       int ret;
+       
+       ret = (*algo_data->aux_ch)(adapter, mode,
+                                  write_byte, read_byte);
+       return ret;
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+       int mode = MODE_I2C_START;
+       int ret;
+
+       if (reading)
+               mode |= MODE_I2C_READ;
+       else
+               mode |= MODE_I2C_WRITE;
+       algo_data->address = address;
+       algo_data->running = true;
+       ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+       return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+       int mode = MODE_I2C_STOP;
+
+       if (reading)
+               mode |= MODE_I2C_READ;
+       else
+               mode |= MODE_I2C_WRITE;
+       if (algo_data->running) {
+               (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+               algo_data->running = false;
+       }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+       int ret;
+
+       if (!algo_data->running)
+               return -EIO;
+
+       ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+       return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+       int ret;
+
+       if (!algo_data->running)
+               return -EIO;
+
+       ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+       return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+                    struct i2c_msg *msgs,
+                    int num)
+{
+       int ret = 0;
+       bool reading = false;
+       int m;
+       int b;
+
+       for (m = 0; m < num; m++) {
+               u16 len = msgs[m].len;
+               u8 *buf = msgs[m].buf;
+               reading = (msgs[m].flags & I2C_M_RD) != 0;
+               ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+               if (ret < 0)
+                       break;
+               if (reading) {
+                       for (b = 0; b < len; b++) {
+                               ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+                               if (ret < 0)
+                                       break;
+                       }
+               } else {
+                       for (b = 0; b < len; b++) {
+                               ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+                               if (ret < 0)
+                                       break;
+                       }
+               }
+               if (ret < 0)
+                       break;
+       }
+       if (ret >= 0)
+               ret = num;
+       i2c_algo_dp_aux_stop(adapter, reading);
+       DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+       return ret;
+}
+
+static u32
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+              I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+              I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+              I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm i2c_dp_aux_algo = {
+       .master_xfer    = i2c_algo_dp_aux_xfer,
+       .functionality  = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+       (void) i2c_algo_dp_aux_address(adapter, 0, false);
+       (void) i2c_algo_dp_aux_stop(adapter, false);
+                                          
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+       adapter->algo = &i2c_dp_aux_algo;
+       adapter->retries = 3;
+       i2c_dp_aux_reset_bus(adapter);
+       return 0;
+}
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+       int error;
+       
+       error = i2c_dp_aux_prepare_bus(adapter);
+       if (error)
+               return error;
+       error = i2c_add_adapter(adapter);
+       return error;
+}
+EXPORT_SYMBOL(i2c_dp_aux_add_bus);
diff --git a/services4/3rdparty/linux_drm/drm_drawable.c b/services4/3rdparty/linux_drm/drm_drawable.c
new file mode 100644 (file)
index 0000000..c53c976
--- /dev/null
@@ -0,0 +1,198 @@
+/**
+ * \file drm_drawable.c
+ * IOCTLs for drawables
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ * \author Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Allocate drawable ID and memory to store information about it.
+ */
+int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       unsigned long irqflags;
+       struct drm_draw *draw = data;
+       int new_id = 0;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&dev->drw_lock, irqflags);
+       ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
+       if (ret == -EAGAIN) {
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+               goto again;
+       }
+
+       spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+       draw->handle = new_id;
+
+       DRM_DEBUG("%d\n", draw->handle);
+
+       return 0;
+}
+
+/**
+ * Free drawable ID and memory to store information about it.
+ */
+int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_draw *draw = data;
+       unsigned long irqflags;
+       struct drm_drawable_info *info;
+
+       spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+       info = drm_get_drawable_info(dev, draw->handle);
+       if (info == NULL) {
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+               return -EINVAL;
+       }
+       kfree(info->rects);
+       kfree(info);
+
+       idr_remove(&dev->drw_idr, draw->handle);
+
+       spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+       DRM_DEBUG("%d\n", draw->handle);
+       return 0;
+}
+
+int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_update_draw *update = data;
+       unsigned long irqflags;
+       struct drm_clip_rect *rects;
+       struct drm_drawable_info *info;
+       int err;
+
+       info = idr_find(&dev->drw_idr, update->handle);
+       if (!info) {
+               info = kzalloc(sizeof(*info), GFP_KERNEL);
+               if (!info)
+                       return -ENOMEM;
+               if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
+                       DRM_ERROR("No such drawable %d\n", update->handle);
+                       kfree(info);
+                       return -EINVAL;
+               }
+       }
+
+       switch (update->type) {
+       case DRM_DRAWABLE_CLIPRECTS:
+               if (update->num == 0)
+                       rects = NULL;
+               else if (update->num != info->num_rects) {
+                       rects = kmalloc(update->num *
+                                       sizeof(struct drm_clip_rect),
+                                       GFP_KERNEL);
+               } else
+                       rects = info->rects;
+
+               if (update->num && !rects) {
+                       DRM_ERROR("Failed to allocate cliprect memory\n");
+                       err = -ENOMEM;
+                       goto error;
+               }
+
+               if (update->num && DRM_COPY_FROM_USER(rects,
+                                                    (struct drm_clip_rect __user *)
+                                                    (unsigned long)update->data,
+                                                    update->num *
+                                                    sizeof(*rects))) {
+                       DRM_ERROR("Failed to copy cliprects from userspace\n");
+                       err = -EFAULT;
+                       goto error;
+               }
+
+               spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+               if (rects != info->rects) {
+                       kfree(info->rects);
+               }
+
+               info->rects = rects;
+               info->num_rects = update->num;
+
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+               DRM_DEBUG("Updated %d cliprects for drawable %d\n",
+                         info->num_rects, update->handle);
+               break;
+       default:
+               DRM_ERROR("Invalid update type %d\n", update->type);
+               return -EINVAL;
+       }
+
+       return 0;
+
+error:
+       if (rects != info->rects)
+               kfree(rects);
+
+       return err;
+}
+
+/**
+ * Caller must hold the drawable spinlock!
+ */
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
+{
+       return idr_find(&dev->drw_idr, id);
+}
+EXPORT_SYMBOL(drm_get_drawable_info);
+
+static int drm_drawable_free(int idr, void *p, void *data)
+{
+       struct drm_drawable_info *info = p;
+
+       if (info) {
+               kfree(info->rects);
+               kfree(info);
+       }
+
+       return 0;
+}
+
+void drm_drawable_free_all(struct drm_device *dev)
+{
+       idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
+       idr_remove_all(&dev->drw_idr);
+}
diff --git a/services4/3rdparty/linux_drm/drm_drv.c b/services4/3rdparty/linux_drm/drm_drv.c
new file mode 100644 (file)
index 0000000..271835a
--- /dev/null
@@ -0,0 +1,529 @@
+/**
+ * \file drm_drv.c
+ * Generic driver template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * To use this template, you must at least define the following (samples
+ * given for the MGA driver):
+ *
+ * \code
+ * #define DRIVER_AUTHOR       "VA Linux Systems, Inc."
+ *
+ * #define DRIVER_NAME         "mga"
+ * #define DRIVER_DESC         "Matrox G200/G400"
+ * #define DRIVER_DATE         "20001127"
+ *
+ * #define drm_x               mga_##x
+ * \endcode
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "drm_core.h"
+
+
+static int drm_version(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+
+/** Ioctl table */
+static struct drm_ioctl_desc drm_ioctls[] = {
+       DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+       /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+       DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+#if __OS_HAS_AGP
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
+};
+
+#define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+       struct drm_vma_entry *vma, *vma_temp;
+       int i;
+
+       DRM_DEBUG("\n");
+
+       if (dev->driver->lastclose)
+               dev->driver->lastclose(dev);
+       DRM_DEBUG("driver lastclose completed\n");
+
+       if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_irq_uninstall(dev);
+
+       mutex_lock(&dev->struct_mutex);
+
+       /* Clear AGP information */
+       if (drm_core_has_AGP(dev) && dev->agp &&
+                       !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               struct drm_agp_mem *entry, *tempe;
+
+               /* Remove AGP resources, but leave dev->agp
+                  intact until drv_cleanup is called. */
+               list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+                       if (entry->bound)
+                               drm_unbind_agp(entry->memory);
+                       drm_free_agp(entry->memory, entry->pages);
+                       kfree(entry);
+               }
+               INIT_LIST_HEAD(&dev->agp->memory);
+
+               if (dev->agp->acquired)
+                       drm_agp_release(dev);
+
+               dev->agp->acquired = 0;
+               dev->agp->enabled = 0;
+       }
+       if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               drm_sg_cleanup(dev->sg);
+               dev->sg = NULL;
+       }
+
+       /* Clear vma list (only built for debugging) */
+       list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+               list_del(&vma->head);
+               kfree(vma);
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
+               for (i = 0; i < dev->queue_count; i++) {
+                       kfree(dev->queuelist[i]);
+                       dev->queuelist[i] = NULL;
+               }
+               kfree(dev->queuelist);
+               dev->queuelist = NULL;
+       }
+       dev->queue_count = 0;
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_dma_takedown(dev);
+
+       dev->dev_mapping = NULL;
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_DEBUG("lastclose completed\n");
+       return 0;
+}
+
+/**
+ * Module initialization. Called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported).
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes an array of drm_device structures, and attempts to
+ * initialize all available devices, using consecutive minors, registering the
+ * stubs and initializing the device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_init(struct drm_driver *driver)
+{
+       DRM_DEBUG("\n");
+       INIT_LIST_HEAD(&driver->device_list);
+
+       if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
+               return drm_platform_init(driver);
+       else
+               return drm_pci_init(driver);
+}
+
+EXPORT_SYMBOL(drm_init);
+
+void drm_exit(struct drm_driver *driver)
+{
+       struct drm_device *dev, *tmp;
+       DRM_DEBUG("\n");
+
+       if (driver->driver_features & DRIVER_MODESET) {
+               pci_unregister_driver(&driver->pci_driver);
+       } else {
+               list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+                       drm_put_dev(dev);
+       }
+
+       DRM_INFO("Module unloaded\n");
+}
+
+EXPORT_SYMBOL(drm_exit);
+
+/** File operations structure */
+static const struct file_operations drm_stub_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_stub_open,
+       .llseek = noop_llseek,
+};
+
+static int __init drm_core_init(void)
+{
+       int ret = -ENOMEM;
+
+       drm_global_init();
+       idr_init(&drm_minors_idr);
+
+       if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
+               goto err_p1;
+
+       drm_class = drm_sysfs_create(THIS_MODULE, "drm");
+       if (IS_ERR(drm_class)) {
+               printk(KERN_ERR "DRM: Error creating drm class.\n");
+               ret = PTR_ERR(drm_class);
+               goto err_p2;
+       }
+
+       drm_proc_root = proc_mkdir("dri", NULL);
+       if (!drm_proc_root) {
+               DRM_ERROR("Cannot create /proc/dri\n");
+               ret = -1;
+               goto err_p3;
+       }
+
+       drm_debugfs_root = debugfs_create_dir("dri", NULL);
+       if (!drm_debugfs_root) {
+               DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
+               ret = -1;
+               goto err_p3;
+       }
+
+       DRM_INFO("Initialized %s %d.%d.%d %s\n",
+                CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+       return 0;
+err_p3:
+       drm_sysfs_destroy();
+err_p2:
+       unregister_chrdev(DRM_MAJOR, "drm");
+
+       idr_destroy(&drm_minors_idr);
+err_p1:
+       return ret;
+}
+
+static void __exit drm_core_exit(void)
+{
+       remove_proc_entry("dri", NULL);
+       debugfs_remove(drm_debugfs_root);
+       drm_sysfs_destroy();
+
+       unregister_chrdev(DRM_MAJOR, "drm");
+
+       idr_remove_all(&drm_minors_idr);
+       idr_destroy(&drm_minors_idr);
+}
+
+module_init(drm_core_init);
+module_exit(drm_core_exit);
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+{
+       int len;
+
+       /* don't overflow userbuf */
+       len = strlen(value);
+       if (len > *buf_len)
+               len = *buf_len;
+
+       /* let userspace know exact length of driver value (which could be
+        * larger than the userspace-supplied buffer) */
+       *buf_len = strlen(value);
+
+       /* finally, try filling in the userbuf */
+       if (len && buf)
+               if (copy_to_user(buf, value, len))
+                       return -EFAULT;
+       return 0;
+}
+
+/**
+ * Get version information
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_version structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+static int drm_version(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_version *version = data;
+       int err;
+
+       version->version_major = dev->driver->major;
+       version->version_minor = dev->driver->minor;
+       version->version_patchlevel = dev->driver->patchlevel;
+       err = drm_copy_field(version->name, &version->name_len,
+                       dev->driver->name);
+       if (!err)
+               err = drm_copy_field(version->date, &version->date_len,
+                               dev->driver->date);
+       if (!err)
+               err = drm_copy_field(version->desc, &version->desc_len,
+                               dev->driver->desc);
+
+       return err;
+}
+
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
+long drm_ioctl(struct file *filp,
+             unsigned int cmd, unsigned long arg)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev;
+       struct drm_ioctl_desc *ioctl;
+       drm_ioctl_t *func;
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+       int retcode = -EINVAL;
+       char stack_kdata[128];
+       char *kdata = NULL;
+       unsigned int usize, asize;
+
+       dev = file_priv->minor->dev;
+       atomic_inc(&dev->ioctl_count);
+       atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+       ++file_priv->ioctl_count;
+
+       DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
+                 task_pid_nr(current), cmd, nr,
+                 (long)old_encode_dev(file_priv->minor->device),
+                 file_priv->authenticated);
+
+       if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+           ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+               goto err_i1;
+       if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+           (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+               u32 drv_size;
+               ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+               drv_size = _IOC_SIZE(ioctl->cmd_drv);
+               usize = asize = _IOC_SIZE(cmd);
+               if (drv_size > asize)
+                       asize = drv_size;
+       }
+       else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+               ioctl = &drm_ioctls[nr];
+               cmd = ioctl->cmd;
+               usize = asize = _IOC_SIZE(cmd);
+       } else
+               goto err_i1;
+
+       /* Do not trust userspace, use our own definition */
+       func = ioctl->func;
+       /* is there a local override? */
+       if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
+               func = dev->driver->dma_ioctl;
+
+       if (!func) {
+               DRM_DEBUG("no function\n");
+               retcode = -EINVAL;
+       } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
+                  ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+                  ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
+                  (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+               retcode = -EACCES;
+       } else {
+               if (cmd & (IOC_IN | IOC_OUT)) {
+                       if (asize <= sizeof(stack_kdata)) {
+                               kdata = stack_kdata;
+                       } else {
+                               kdata = kmalloc(asize, GFP_KERNEL);
+                               if (!kdata) {
+                                       retcode = -ENOMEM;
+                                       goto err_i1;
+                               }
+                       }
+               }
+
+               if (cmd & IOC_IN) {
+                       if (copy_from_user(kdata, (void __user *)arg,
+                                          usize) != 0) {
+                               retcode = -EFAULT;
+                               goto err_i1;
+                       }
+               } else
+                       memset(kdata, 0, usize);
+
+               if (ioctl->flags & DRM_UNLOCKED)
+                       retcode = func(dev, kdata, file_priv);
+               else {
+                       mutex_lock(&drm_global_mutex);
+                       retcode = func(dev, kdata, file_priv);
+                       mutex_unlock(&drm_global_mutex);
+               }
+
+               if (cmd & IOC_OUT) {
+                       if (copy_to_user((void __user *)arg, kdata,
+                                        usize) != 0)
+                               retcode = -EFAULT;
+               }
+       }
+
+      err_i1:
+       if (kdata != stack_kdata)
+               kfree(kdata);
+       atomic_dec(&dev->ioctl_count);
+       if (retcode)
+               DRM_DEBUG("ret = %x\n", retcode);
+       return retcode;
+}
+
+EXPORT_SYMBOL(drm_ioctl);
+
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+       struct drm_map_list *entry;
+
+       list_for_each_entry(entry, &dev->maplist, head) {
+               if (entry->map && entry->map->type == _DRM_SHM &&
+                   (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+                       return entry->map;
+               }
+       }
+       return NULL;
+}
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/services4/3rdparty/linux_drm/drm_edid.c b/services4/3rdparty/linux_drm/drm_edid.c
new file mode 100644 (file)
index 0000000..a245d17
--- /dev/null
@@ -0,0 +1,1506 @@
+/*
+ * Copyright (c) 2006 Luc Verhaegen (quirks list)
+ * Copyright (c) 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ *   Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_edid_modes.h"
+
+#define version_greater(edid, maj, min) \
+       (((edid)->version > (maj)) || \
+        ((edid)->version == (maj) && (edid)->revision > (min)))
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/*
+ * EDID blocks out in the wild have a variety of bugs, try to collect
+ * them here (note that userspace may work around broken monitors first,
+ * but fixes should make their way here so that the kernel "just works"
+ * on as many displays as possible).
+ */
+
+/* First detailed mode wrong, use largest 60Hz mode */
+#define EDID_QUIRK_PREFER_LARGE_60             (1 << 0)
+/* Reported 135MHz pixel clock is too high, needs adjustment */
+#define EDID_QUIRK_135_CLOCK_TOO_HIGH          (1 << 1)
+/* Prefer the largest mode at 75 Hz */
+#define EDID_QUIRK_PREFER_LARGE_75             (1 << 2)
+/* Detail timing is in cm not mm */
+#define EDID_QUIRK_DETAILED_IN_CM              (1 << 3)
+/* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE   (1 << 4)
+/* Monitor forgot to set the first detailed is preferred bit. */
+#define EDID_QUIRK_FIRST_DETAILED_PREFERRED    (1 << 5)
+/* use +hsync +vsync for detailed mode */
+#define EDID_QUIRK_DETAILED_SYNC_PP            (1 << 6)
+
+struct detailed_mode_closure {
+       struct drm_connector *connector;
+       struct edid *edid;
+       bool preferred;
+       u32 quirks;
+       int modes;
+};
+
+#define LEVEL_DMT      0
+#define LEVEL_GTF      1
+#define LEVEL_GTF2     2
+#define LEVEL_CVT      3
+
+static struct edid_quirk {
+       char *vendor;
+       int product_id;
+       u32 quirks;
+} edid_quirk_list[] = {
+       /* Acer AL1706 */
+       { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+       /* Acer F51 */
+       { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+       /* Unknown Acer */
+       { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Belinea 10 15 55 */
+       { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+       { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+
+       /* Envision Peripherals, Inc. EN-7100e */
+       { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+       /* Envision EN2028 */
+       { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
+
+       /* Funai Electronics PM36B */
+       { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+         EDID_QUIRK_DETAILED_IN_CM },
+
+       /* LG Philips LCD LP154W01-A5 */
+       { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+       { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+
+       /* Philips 107p5 CRT */
+       { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Proview AY765C */
+       { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Samsung SyncMaster 205BW.  Note: irony */
+       { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+       /* Samsung SyncMaster 22[5-6]BW */
+       { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+       { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+};
+
+/*** DDC fetch and block validation ***/
+
+static const u8 edid_header[] = {
+       0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
+
+/*
+ * Sanity check the EDID block (base or extension).  Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
+ */
+static bool
+drm_edid_block_valid(u8 *raw_edid)
+{
+       int i;
+       u8 csum = 0;
+       struct edid *edid = (struct edid *)raw_edid;
+
+       if (raw_edid[0] == 0x00) {
+               int score = 0;
+
+               for (i = 0; i < sizeof(edid_header); i++)
+                       if (raw_edid[i] == edid_header[i])
+                               score++;
+
+               if (score == 8) ;
+               else if (score >= 6) {
+                       DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+                       memcpy(raw_edid, edid_header, sizeof(edid_header));
+               } else {
+                       goto bad;
+               }
+       }
+
+       for (i = 0; i < EDID_LENGTH; i++)
+               csum += raw_edid[i];
+       if (csum) {
+               DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+
+               /* allow CEA to slide through, switches mangle this */
+               if (raw_edid[0] != 0x02)
+                       goto bad;
+       }
+
+       /* per-block-type checks */
+       switch (raw_edid[0]) {
+       case 0: /* base */
+               if (edid->version != 1) {
+                       DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+                       goto bad;
+               }
+
+               if (edid->revision > 4)
+                       DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+               break;
+
+       default:
+               break;
+       }
+
+       return 1;
+
+bad:
+       if (raw_edid) {
+               DRM_ERROR("Raw EDID:\n");
+               print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
+               printk("\n");
+       }
+       return 0;
+}
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+       int i;
+       u8 *raw = (u8 *)edid;
+
+       if (!edid)
+               return false;
+
+       for (i = 0; i <= edid->extensions; i++)
+               if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+                       return false;
+
+       return true;
+}
+EXPORT_SYMBOL(drm_edid_is_valid);
+
+#define DDC_ADDR 0x50
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf     : EDID data buffer to be filled
+ * \param len     : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+                     int block, int len)
+{
+       unsigned char start = block * EDID_LENGTH;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = DDC_ADDR,
+                       .flags  = 0,
+                       .len    = 1,
+                       .buf    = &start,
+               }, {
+                       .addr   = DDC_ADDR,
+                       .flags  = I2C_M_RD,
+                       .len    = len,
+                       .buf    = buf,
+               }
+       };
+
+       if (i2c_transfer(adapter, msgs, 2) == 2)
+               return 0;
+
+       return -1;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+       int i, j = 0, valid_extensions = 0;
+       u8 *block, *new;
+
+       if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+               return NULL;
+
+       /* base block fetch */
+       for (i = 0; i < 4; i++) {
+               if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+                       goto out;
+               if (drm_edid_block_valid(block))
+                       break;
+       }
+       if (i == 4)
+               goto carp;
+
+       /* if there's no extensions, we're done */
+       if (block[0x7e] == 0)
+               return block;
+
+       new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+       if (!new)
+               goto out;
+       block = new;
+
+       for (j = 1; j <= block[0x7e]; j++) {
+               for (i = 0; i < 4; i++) {
+                       if (drm_do_probe_ddc_edid(adapter,
+                                 block + (valid_extensions + 1) * EDID_LENGTH,
+                                 j, EDID_LENGTH))
+                               goto out;
+                       if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+                               valid_extensions++;
+                               break;
+                       }
+               }
+               if (i == 4)
+                       dev_warn(connector->dev->dev,
+                        "%s: Ignoring invalid EDID block %d.\n",
+                        drm_get_connector_name(connector), j);
+       }
+
+       if (valid_extensions != block[0x7e]) {
+               block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+               block[0x7e] = valid_extensions;
+               new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+               if (!new)
+                       goto out;
+               block = new;
+       }
+
+       return block;
+
+carp:
+       dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
+                drm_get_connector_name(connector), j);
+
+out:
+       kfree(block);
+       return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+static bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+       unsigned char out;
+
+       return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible.  If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+                         struct i2c_adapter *adapter)
+{
+       struct edid *edid = NULL;
+
+       if (drm_probe_ddc(adapter))
+               edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+       connector->display_info.raw_edid = (char *)edid;
+
+       return edid;
+
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/*** EDID parsing ***/
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+static bool edid_vendor(struct edid *edid, char *vendor)
+{
+       char edid_vendor[3];
+
+       edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+       edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+                         ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+       edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
+
+       return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+static u32 edid_get_quirks(struct edid *edid)
+{
+       struct edid_quirk *quirk;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+               quirk = &edid_quirk_list[i];
+
+               if (edid_vendor(edid, quirk->vendor) &&
+                   (EDID_PRODUCT_ID(edid) == quirk->product_id))
+                       return quirk->quirks;
+       }
+
+       return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+static void edid_fixup_preferred(struct drm_connector *connector,
+                                u32 quirks)
+{
+       struct drm_display_mode *t, *cur_mode, *preferred_mode;
+       int target_refresh = 0;
+
+       if (list_empty(&connector->probed_modes))
+               return;
+
+       if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+               target_refresh = 60;
+       if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+               target_refresh = 75;
+
+       preferred_mode = list_first_entry(&connector->probed_modes,
+                                         struct drm_display_mode, head);
+
+       list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
+               cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+               if (cur_mode == preferred_mode)
+                       continue;
+
+               /* Largest mode is preferred */
+               if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+                       preferred_mode = cur_mode;
+
+               /* At a given size, try to get closest to target refresh */
+               if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+                   MODE_REFRESH_DIFF(cur_mode, target_refresh) <
+                   MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+                       preferred_mode = cur_mode;
+               }
+       }
+
+       preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+                                          int hsize, int vsize, int fresh)
+{
+       int i;
+       struct drm_display_mode *ptr, *mode;
+
+       mode = NULL;
+       for (i = 0; i < drm_num_dmt_modes; i++) {
+               ptr = &drm_dmt_modes[i];
+               if (hsize == ptr->hdisplay &&
+                       vsize == ptr->vdisplay &&
+                       fresh == drm_mode_vrefresh(ptr)) {
+                       /* get the expected default mode */
+                       mode = drm_mode_duplicate(dev, ptr);
+                       break;
+               }
+       }
+       return mode;
+}
+EXPORT_SYMBOL(drm_mode_find_dmt);
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+       int i, n = 0;
+       u8 rev = ext[0x01], d = ext[0x02];
+       u8 *det_base = ext + d;
+
+       switch (rev) {
+       case 0:
+               /* can't happen */
+               return;
+       case 1:
+               /* have to infer how many blocks we have, check pixel clock */
+               for (i = 0; i < 6; i++)
+                       if (det_base[18*i] || det_base[18*i+1])
+                               n++;
+               break;
+       default:
+               /* explicit count */
+               n = min(ext[0x03] & 0x0f, 6);
+               break;
+       }
+
+       for (i = 0; i < n; i++)
+               cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+       unsigned int i, n = min((int)ext[0x02], 6);
+       u8 *det_base = ext + 5;
+
+       if (ext[0x01] != 1)
+               return; /* unknown version */
+
+       for (i = 0; i < n; i++)
+               cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+       int i;
+       struct edid *edid = (struct edid *)raw_edid;
+
+       if (edid == NULL)
+               return;
+
+       for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+               cb(&(edid->detailed_timings[i]), closure);
+
+       for (i = 1; i <= raw_edid[0x7e]; i++) {
+               u8 *ext = raw_edid + (i * EDID_LENGTH);
+               switch (*ext) {
+               case CEA_EXT:
+                       cea_for_each_detailed_block(ext, cb, closure);
+                       break;
+               case VTB_EXT:
+                       vtb_for_each_detailed_block(ext, cb, closure);
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+       u8 *r = (u8 *)t;
+       if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+               if (r[15] & 0x10)
+                       *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly.  For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+       if (edid->revision >= 4) {
+               bool ret;
+               drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+               return ret;
+       }
+
+       return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+       u8 *r = (u8 *)t;
+       if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+               *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+       u8 *r = NULL;
+       drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+       return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+       u8 *r = NULL;
+       drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+       return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+       u8 *r = NULL;
+       drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+       return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+       u8 *r = NULL;
+       drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+       return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+       u8 *r = NULL;
+       drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+       return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+       if (edid->revision >= 2) {
+               if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+                       return LEVEL_CVT;
+               if (drm_gtf2_hbreak(edid))
+                       return LEVEL_GTF2;
+               return LEVEL_GTF;
+       }
+       return LEVEL_DMT;
+}
+
+/*
+ * 0 is reserved.  The spec says 0x01 fill for unused timings.  Some old
+ * monitors fill with ascii space (0x20) instead.
+ */
+static int
+bad_std_timing(u8 a, u8 b)
+{
+       return (a == 0x00 && b == 0x00) ||
+              (a == 0x01 && b == 0x01) ||
+              (a == 0x20 && b == 0x20);
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ * @timing_level: standard timing level
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT/GTF/DMT.
+ */
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+            struct std_timing *t, int revision)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *m, *mode = NULL;
+       int hsize, vsize;
+       int vrefresh_rate;
+       unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
+               >> EDID_TIMING_ASPECT_SHIFT;
+       unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
+               >> EDID_TIMING_VFREQ_SHIFT;
+       int timing_level = standard_timing_level(edid);
+
+       if (bad_std_timing(t->hsize, t->vfreq_aspect))
+               return NULL;
+
+       /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
+       hsize = t->hsize * 8 + 248;
+       /* vrefresh_rate = vfreq + 60 */
+       vrefresh_rate = vfreq + 60;
+       /* the vdisplay is calculated based on the aspect ratio */
+       if (aspect_ratio == 0) {
+               if (revision < 3)
+                       vsize = hsize;
+               else
+                       vsize = (hsize * 10) / 16;
+       } else if (aspect_ratio == 1)
+               vsize = (hsize * 3) / 4;
+       else if (aspect_ratio == 2)
+               vsize = (hsize * 4) / 5;
+       else
+               vsize = (hsize * 9) / 16;
+
+       /* HDTV hack, part 1 */
+       if (vrefresh_rate == 60 &&
+           ((hsize == 1360 && vsize == 765) ||
+            (hsize == 1368 && vsize == 769))) {
+               hsize = 1366;
+               vsize = 768;
+       }
+
+       /*
+        * If this connector already has a mode for this size and refresh
+        * rate (because it came from detailed or CVT info), use that
+        * instead.  This way we don't have to guess at interlace or
+        * reduced blanking.
+        */
+       list_for_each_entry(m, &connector->probed_modes, head)
+               if (m->hdisplay == hsize && m->vdisplay == vsize &&
+                   drm_mode_vrefresh(m) == vrefresh_rate)
+                       return NULL;
+
+       /* HDTV hack, part 2 */
+       if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+               mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
+                                   false);
+               mode->hdisplay = 1366;
+               mode->hsync_start = mode->hsync_start - 1;
+               mode->hsync_end = mode->hsync_end - 1;
+               return mode;
+       }
+
+       /* check whether it can be found in default mode table */
+       mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
+       if (mode)
+               return mode;
+
+       switch (timing_level) {
+       case LEVEL_DMT:
+               break;
+       case LEVEL_GTF:
+               mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+               break;
+       case LEVEL_GTF2:
+               /*
+                * This is potentially wrong if there's ever a monitor with
+                * more than one ranges section, each claiming a different
+                * secondary GTF curve.  Please don't do that.
+                */
+               mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+               if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+                       kfree(mode);
+                       mode = drm_gtf_mode_complex(dev, hsize, vsize,
+                                                   vrefresh_rate, 0, 0,
+                                                   drm_gtf2_m(edid),
+                                                   drm_gtf2_2c(edid),
+                                                   drm_gtf2_k(edid),
+                                                   drm_gtf2_2j(edid));
+               }
+               break;
+       case LEVEL_CVT:
+               mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+                                   false);
+               break;
+       }
+       return mode;
+}
+
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded.  Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size.  Technically we
+ * should be checking refresh rate too.  Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+                           struct detailed_pixel_timing *pt)
+{
+       int i;
+       static const struct {
+               int w, h;
+       } cea_interlaced[] = {
+               { 1920, 1080 },
+               {  720,  480 },
+               { 1440,  480 },
+               { 2880,  480 },
+               {  720,  576 },
+               { 1440,  576 },
+               { 2880,  576 },
+       };
+
+       if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
+               if ((mode->hdisplay == cea_interlaced[i].w) &&
+                   (mode->vdisplay == cea_interlaced[i].h / 2)) {
+                       mode->vdisplay *= 2;
+                       mode->vsync_start *= 2;
+                       mode->vsync_end *= 2;
+                       mode->vtotal *= 2;
+                       mode->vtotal |= 1;
+               }
+       }
+
+       mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+ * @edid: EDID block
+ * @timing: EDID detailed timing info
+ * @quirks: quirks to apply
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.
+ */
+static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+                                                 struct edid *edid,
+                                                 struct detailed_timing *timing,
+                                                 u32 quirks)
+{
+       struct drm_display_mode *mode;
+       struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+       unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
+       unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
+       unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
+       unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
+       unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+       unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+       unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+       unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
+
+       /* ignore tiny modes */
+       if (hactive < 64 || vactive < 64)
+               return NULL;
+
+       if (pt->misc & DRM_EDID_PT_STEREO) {
+               printk(KERN_WARNING "stereo mode not supported\n");
+               return NULL;
+       }
+       if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+               printk(KERN_WARNING "composite sync not supported\n");
+       }
+
+       /* it is incorrect if hsync/vsync width is zero */
+       if (!hsync_pulse_width || !vsync_pulse_width) {
+               DRM_DEBUG_KMS("Incorrect Detailed timing. "
+                               "Wrong Hsync/Vsync pulse width\n");
+               return NULL;
+       }
+       mode = drm_mode_create(dev);
+       if (!mode)
+               return NULL;
+
+       mode->type = DRM_MODE_TYPE_DRIVER;
+
+       if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+               timing->pixel_clock = cpu_to_le16(1088);
+
+       mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
+
+       mode->hdisplay = hactive;
+       mode->hsync_start = mode->hdisplay + hsync_offset;
+       mode->hsync_end = mode->hsync_start + hsync_pulse_width;
+       mode->htotal = mode->hdisplay + hblank;
+
+       mode->vdisplay = vactive;
+       mode->vsync_start = mode->vdisplay + vsync_offset;
+       mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+       mode->vtotal = mode->vdisplay + vblank;
+
+       /* Some EDIDs have bogus h/vtotal values */
+       if (mode->hsync_end > mode->htotal)
+               mode->htotal = mode->hsync_end + 1;
+       if (mode->vsync_end > mode->vtotal)
+               mode->vtotal = mode->vsync_end + 1;
+
+       drm_mode_do_interlace_quirk(mode, pt);
+
+       drm_mode_set_name(mode);
+
+       if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+               pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+       }
+
+       mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+               DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+       mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+               DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+       mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+       mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+
+       if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+               mode->width_mm *= 10;
+               mode->height_mm *= 10;
+       }
+
+       if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+               mode->width_mm = edid->width_cm * 10;
+               mode->height_mm = edid->height_cm * 10;
+       }
+
+       return mode;
+}
+
+static bool
+mode_is_rb(struct drm_display_mode *mode)
+{
+       return (mode->htotal - mode->hdisplay == 160) &&
+              (mode->hsync_end - mode->hdisplay == 80) &&
+              (mode->hsync_end - mode->hsync_start == 32) &&
+              (mode->vsync_start - mode->vdisplay == 3);
+}
+
+static bool
+mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+       int hsync, hmin, hmax;
+
+       hmin = t[7];
+       if (edid->revision >= 4)
+           hmin += ((t[4] & 0x04) ? 255 : 0);
+       hmax = t[8];
+       if (edid->revision >= 4)
+           hmax += ((t[4] & 0x08) ? 255 : 0);
+       hsync = drm_mode_hsync(mode);
+
+       return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+       int vsync, vmin, vmax;
+
+       vmin = t[5];
+       if (edid->revision >= 4)
+           vmin += ((t[4] & 0x01) ? 255 : 0);
+       vmax = t[6];
+       if (edid->revision >= 4)
+           vmax += ((t[4] & 0x02) ? 255 : 0);
+       vsync = drm_mode_vrefresh(mode);
+
+       return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+       /* unspecified */
+       if (t[9] == 0 || t[9] == 255)
+               return 0;
+
+       /* 1.4 with CVT support gives us real precision, yay */
+       if (edid->revision >= 4 && t[10] == 0x04)
+               return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+       /* 1.3 is pathetic, so fuzz up a bit */
+       return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+             struct detailed_timing *timing)
+{
+       u32 max_clock;
+       u8 *t = (u8 *)timing;
+
+       if (!mode_in_hsync_range(mode, edid, t))
+               return false;
+
+       if (!mode_in_vsync_range(mode, edid, t))
+               return false;
+
+       if ((max_clock = range_pixel_clock(edid, t)))
+               if (mode->clock > max_clock)
+                       return false;
+
+       /* 1.4 max horizontal check */
+       if (edid->revision >= 4 && t[10] == 0x04)
+               if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+                       return false;
+
+       if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+               return false;
+
+       return true;
+}
+
+/*
+ * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
+ * need to account for them.
+ */
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+                       struct detailed_timing *timing)
+{
+       int i, modes = 0;
+       struct drm_display_mode *newmode;
+       struct drm_device *dev = connector->dev;
+
+       for (i = 0; i < drm_num_dmt_modes; i++) {
+               if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+                       newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+       }
+
+       return modes;
+}
+
+static void
+do_inferred_modes(struct detailed_timing *timing, void *c)
+{
+       struct detailed_mode_closure *closure = c;
+       struct detailed_non_pixel *data = &timing->data.other_data;
+       int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+
+       if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
+               closure->modes += drm_gtf_modes_for_range(closure->connector,
+                                                         closure->edid,
+                                                         timing);
+}
+
+static int
+add_inferred_modes(struct drm_connector *connector, struct edid *edid)
+{
+       struct detailed_mode_closure closure = {
+               connector, edid, 0, 0, 0
+       };
+
+       if (version_greater(edid, 1, 0))
+               drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
+                                           &closure);
+
+       return closure.modes;
+}
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+       int i, j, m, modes = 0;
+       struct drm_display_mode *mode;
+       u8 *est = ((u8 *)timing) + 5;
+
+       for (i = 0; i < 6; i++) {
+               for (j = 7; j > 0; j--) {
+                       m = (i * 8) + (7 - j);
+                       if (m >= ARRAY_SIZE(est3_modes))
+                               break;
+                       if (est[i] & (1 << j)) {
+                               mode = drm_mode_find_dmt(connector->dev,
+                                                        est3_modes[m].w,
+                                                        est3_modes[m].h,
+                                                        est3_modes[m].r
+                                                        /*, est3_modes[m].rb */);
+                               if (mode) {
+                                       drm_mode_probed_add(connector, mode);
+                                       modes++;
+                               }
+                       }
+               }
+       }
+
+       return modes;
+}
+
+static void
+do_established_modes(struct detailed_timing *timing, void *c)
+{
+       struct detailed_mode_closure *closure = c;
+       struct detailed_non_pixel *data = &timing->data.other_data;
+
+       if (data->type == EDID_DETAIL_EST_TIMINGS)
+               closure->modes += drm_est3_modes(closure->connector, timing);
+}
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above).  Tease them out and add them to the global modes list.
+ */
+static int
+add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       unsigned long est_bits = edid->established_timings.t1 |
+               (edid->established_timings.t2 << 8) |
+               ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+       int i, modes = 0;
+       struct detailed_mode_closure closure = {
+               connector, edid, 0, 0, 0
+       };
+
+       for (i = 0; i <= EDID_EST_TIMINGS; i++) {
+               if (est_bits & (1<<i)) {
+                       struct drm_display_mode *newmode;
+                       newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+       }
+
+       if (version_greater(edid, 1, 0))
+                   drm_for_each_detailed_block((u8 *)edid,
+                                               do_established_modes, &closure);
+
+       return modes + closure.modes;
+}
+
+static void
+do_standard_modes(struct detailed_timing *timing, void *c)
+{
+       struct detailed_mode_closure *closure = c;
+       struct detailed_non_pixel *data = &timing->data.other_data;
+       struct drm_connector *connector = closure->connector;
+       struct edid *edid = closure->edid;
+
+       if (data->type == EDID_DETAIL_STD_MODES) {
+               int i;
+               for (i = 0; i < 6; i++) {
+                       struct std_timing *std;
+                       struct drm_display_mode *newmode;
+
+                       std = &data->data.timings[i];
+                       newmode = drm_mode_std(connector, edid, std,
+                                              edid->revision);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               closure->modes++;
+                       }
+               }
+       }
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the appropriate standard (DMT,
+ * GTF or CVT. Grab them from @edid and add them to the list.
+ */
+static int
+add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+       int i, modes = 0;
+       struct detailed_mode_closure closure = {
+               connector, edid, 0, 0, 0
+       };
+
+       for (i = 0; i < EDID_STD_TIMINGS; i++) {
+               struct drm_display_mode *newmode;
+
+               newmode = drm_mode_std(connector, edid,
+                                      &edid->standard_timings[i],
+                                      edid->revision);
+               if (newmode) {
+                       drm_mode_probed_add(connector, newmode);
+                       modes++;
+               }
+       }
+
+       if (version_greater(edid, 1, 0))
+               drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
+                                           &closure);
+
+       /* XXX should also look for standard codes in VTB blocks */
+
+       return modes + closure.modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+                        struct detailed_timing *timing)
+{
+       int i, j, modes = 0;
+       struct drm_display_mode *newmode;
+       struct drm_device *dev = connector->dev;
+       struct cvt_timing *cvt;
+       const int rates[] = { 60, 85, 75, 60, 50 };
+       const u8 empty[3] = { 0, 0, 0 };
+
+       for (i = 0; i < 4; i++) {
+               int uninitialized_var(width), height;
+               cvt = &(timing->data.other_data.data.cvt[i]);
+
+               if (!memcmp(cvt->code, empty, 3))
+                       continue;
+
+               height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+               switch (cvt->code[1] & 0x0c) {
+               case 0x00:
+                       width = height * 4 / 3;
+                       break;
+               case 0x04:
+                       width = height * 16 / 9;
+                       break;
+               case 0x08:
+                       width = height * 16 / 10;
+                       break;
+               case 0x0c:
+                       width = height * 15 / 9;
+                       break;
+               }
+
+               for (j = 1; j < 5; j++) {
+                       if (cvt->code[2] & (1 << j)) {
+                               newmode = drm_cvt_mode(dev, width, height,
+                                                      rates[j], j == 0,
+                                                      false, false);
+                               if (newmode) {
+                                       drm_mode_probed_add(connector, newmode);
+                                       modes++;
+                               }
+                       }
+               }
+       }
+
+       return modes;
+}
+
+static void
+do_cvt_mode(struct detailed_timing *timing, void *c)
+{
+       struct detailed_mode_closure *closure = c;
+       struct detailed_non_pixel *data = &timing->data.other_data;
+
+       if (data->type == EDID_DETAIL_CVT_3BYTE)
+               closure->modes += drm_cvt_modes(closure->connector, timing);
+}
+
+static int
+add_cvt_modes(struct drm_connector *connector, struct edid *edid)
+{      
+       struct detailed_mode_closure closure = {
+               connector, edid, 0, 0, 0
+       };
+
+       if (version_greater(edid, 1, 2))
+               drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
+
+       /* XXX should also look for CVT codes in VTB blocks */
+
+       return closure.modes;
+}
+
+static void
+do_detailed_mode(struct detailed_timing *timing, void *c)
+{
+       struct detailed_mode_closure *closure = c;
+       struct drm_display_mode *newmode;
+
+       if (timing->pixel_clock) {
+               newmode = drm_mode_detailed(closure->connector->dev,
+                                           closure->edid, timing,
+                                           closure->quirks);
+               if (!newmode)
+                       return;
+
+               if (closure->preferred)
+                       newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+               drm_mode_probed_add(closure->connector, newmode);
+               closure->modes++;
+               closure->preferred = 0;
+       }
+}
+
+/*
+ * add_detailed_modes - Add modes from detailed timings
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ */
+static int
+add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+                  u32 quirks)
+{
+       struct detailed_mode_closure closure = {
+               connector,
+               edid,
+               1,
+               quirks,
+               0
+       };
+
+       if (closure.preferred && !version_greater(edid, 1, 3))
+               closure.preferred =
+                   (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+       drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
+
+       return closure.modes;
+}
+
+#define HDMI_IDENTIFIER 0x000C03
+#define AUDIO_BLOCK    0x01
+#define VENDOR_BLOCK    0x03
+#define EDID_BASIC_AUDIO       (1 << 6)
+
+/**
+ * Search EDID for CEA extension block.
+ */
+static u8 *drm_find_cea_extension(struct edid *edid)
+{
+       u8 *edid_ext = NULL;
+       int i;
+
+       /* No EDID or EDID extensions */
+       if (edid == NULL || edid->extensions == 0)
+               return NULL;
+
+       /* Find CEA extension */
+       for (i = 0; i < edid->extensions; i++) {
+               edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+               if (edid_ext[0] == CEA_EXT)
+                       break;
+       }
+
+       if (i == edid->extensions)
+               return NULL;
+
+       return edid_ext;
+}
+
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+       u8 *edid_ext;
+       int i, hdmi_id;
+       int start_offset, end_offset;
+       bool is_hdmi = false;
+
+       edid_ext = drm_find_cea_extension(edid);
+       if (!edid_ext)
+               goto end;
+
+       /* Data block offset in CEA extension block */
+       start_offset = 4;
+       end_offset = edid_ext[2];
+
+       /*
+        * Because HDMI identifier is in Vendor Specific Block,
+        * search it from all data blocks of CEA extension.
+        */
+       for (i = start_offset; i < end_offset;
+               /* Increased by data block len */
+               i += ((edid_ext[i] & 0x1f) + 1)) {
+               /* Find vendor specific block */
+               if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
+                       hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
+                                 edid_ext[i + 3] << 16;
+                       /* Find HDMI identifier */
+                       if (hdmi_id == HDMI_IDENTIFIER)
+                               is_hdmi = true;
+                       break;
+               }
+       }
+
+end:
+       return is_hdmi;
+}
+EXPORT_SYMBOL(drm_detect_hdmi_monitor);
+
+/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ */
+bool drm_detect_monitor_audio(struct edid *edid)
+{
+       u8 *edid_ext;
+       int i, j;
+       bool has_audio = false;
+       int start_offset, end_offset;
+
+       edid_ext = drm_find_cea_extension(edid);
+       if (!edid_ext)
+               goto end;
+
+       has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+
+       if (has_audio) {
+               DRM_DEBUG_KMS("Monitor has basic audio support\n");
+               goto end;
+       }
+
+       /* Data block offset in CEA extension block */
+       start_offset = 4;
+       end_offset = edid_ext[2];
+
+       for (i = start_offset; i < end_offset;
+                       i += ((edid_ext[i] & 0x1f) + 1)) {
+               if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+                       has_audio = true;
+                       for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+                               DRM_DEBUG_KMS("CEA audio format %d\n",
+                                             (edid_ext[i + j] >> 3) & 0xf);
+                       goto end;
+               }
+       }
+end:
+       return has_audio;
+}
+EXPORT_SYMBOL(drm_detect_monitor_audio);
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+{
+       int num_modes = 0;
+       u32 quirks;
+
+       if (edid == NULL) {
+               return 0;
+       }
+       if (!drm_edid_is_valid(edid)) {
+               dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
+                        drm_get_connector_name(connector));
+               return 0;
+       }
+
+       quirks = edid_get_quirks(edid);
+
+       /*
+        * EDID spec says modes should be preferred in this order:
+        * - preferred detailed mode
+        * - other detailed modes from base block
+        * - detailed modes from extension blocks
+        * - CVT 3-byte code modes
+        * - standard timing codes
+        * - established timing codes
+        * - modes inferred from GTF or CVT range information
+        *
+        * We get this pretty much right.
+        *
+        * XXX order for additional mode types in extension blocks?
+        */
+       num_modes += add_detailed_modes(connector, edid, quirks);
+       num_modes += add_cvt_modes(connector, edid);
+       num_modes += add_standard_modes(connector, edid);
+       num_modes += add_established_modes(connector, edid);
+       num_modes += add_inferred_modes(connector, edid);
+
+       if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+               edid_fixup_preferred(connector, quirks);
+
+       connector->display_info.width_mm = edid->width_cm * 10;
+       connector->display_info.height_mm = edid->height_cm * 10;
+
+       return num_modes;
+}
+EXPORT_SYMBOL(drm_add_edid_modes);
+
+/**
+ * drm_add_modes_noedid - add modes for the connectors without EDID
+ * @connector: connector we're probing
+ * @hdisplay: the horizontal display limit
+ * @vdisplay: the vertical display limit
+ *
+ * Add the specified modes to the connector's mode list. Only when the
+ * hdisplay/vdisplay is not beyond the given limit, it will be added.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_modes_noedid(struct drm_connector *connector,
+                       int hdisplay, int vdisplay)
+{
+       int i, count, num_modes = 0;
+       struct drm_display_mode *mode, *ptr;
+       struct drm_device *dev = connector->dev;
+
+       count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+       if (hdisplay < 0)
+               hdisplay = 0;
+       if (vdisplay < 0)
+               vdisplay = 0;
+
+       for (i = 0; i < count; i++) {
+               ptr = &drm_dmt_modes[i];
+               if (hdisplay && vdisplay) {
+                       /*
+                        * Only when two are valid, they will be used to check
+                        * whether the mode should be added to the mode list of
+                        * the connector.
+                        */
+                       if (ptr->hdisplay > hdisplay ||
+                                       ptr->vdisplay > vdisplay)
+                               continue;
+               }
+               if (drm_mode_vrefresh(ptr) > 61)
+                       continue;
+               mode = drm_mode_duplicate(dev, ptr);
+               if (mode) {
+                       drm_mode_probed_add(connector, mode);
+                       num_modes++;
+               }
+       }
+       return num_modes;
+}
+EXPORT_SYMBOL(drm_add_modes_noedid);
diff --git a/services4/3rdparty/linux_drm/drm_encoder_slave.c b/services4/3rdparty/linux_drm/drm_encoder_slave.c
new file mode 100644 (file)
index 0000000..d62c064
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drm_encoder_slave.h"
+
+/**
+ * drm_i2c_encoder_init - Initialize an I2C slave encoder
+ * @dev:       DRM device.
+ * @encoder:    Encoder to be attached to the I2C device. You aren't
+ *             required to have called drm_encoder_init() before.
+ * @adap:      I2C adapter that will be used to communicate with
+ *             the device.
+ * @info:      Information that will be used to create the I2C device.
+ *             Required fields are @addr and @type.
+ *
+ * Create an I2C device on the specified bus (the module containing its
+ * driver is transparently loaded) and attach it to the specified
+ * &drm_encoder_slave. The @slave_funcs field will be initialized with
+ * the hooks provided by the slave driver.
+ *
+ * If @info->platform_data is non-NULL it will be used as the initial
+ * slave config.
+ *
+ * Returns 0 on success or a negative errno on failure, in particular,
+ * -ENODEV is returned when no matching driver is found.
+ */
+int drm_i2c_encoder_init(struct drm_device *dev,
+                        struct drm_encoder_slave *encoder,
+                        struct i2c_adapter *adap,
+                        const struct i2c_board_info *info)
+{
+       char modalias[sizeof(I2C_MODULE_PREFIX)
+                     + I2C_NAME_SIZE];
+       struct module *module = NULL;
+       struct i2c_client *client;
+       struct drm_i2c_encoder_driver *encoder_drv;
+       int err = 0;
+
+       snprintf(modalias, sizeof(modalias),
+                "%s%s", I2C_MODULE_PREFIX, info->type);
+       request_module(modalias);
+
+       client = i2c_new_device(adap, info);
+       if (!client) {
+               err = -ENOMEM;
+               goto fail;
+       }
+
+       if (!client->driver) {
+               err = -ENODEV;
+               goto fail_unregister;
+       }
+
+       module = client->driver->driver.owner;
+       if (!try_module_get(module)) {
+               err = -ENODEV;
+               goto fail_unregister;
+       }
+
+       encoder->bus_priv = client;
+
+       encoder_drv = to_drm_i2c_encoder_driver(client->driver);
+
+       err = encoder_drv->encoder_init(client, dev, encoder);
+       if (err)
+               goto fail_unregister;
+
+       if (info->platform_data)
+               encoder->slave_funcs->set_config(&encoder->base,
+                                                info->platform_data);
+
+       return 0;
+
+fail_unregister:
+       i2c_unregister_device(client);
+       module_put(module);
+fail:
+       return err;
+}
+EXPORT_SYMBOL(drm_i2c_encoder_init);
+
+/**
+ * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
+ * @drm_encoder:       Encoder to be unregistered.
+ *
+ * This should be called from the @destroy method of an I2C slave
+ * encoder driver once I2C access is no longer needed.
+ */
+void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
+{
+       struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
+       struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
+       struct module *module = client->driver->driver.owner;
+
+       i2c_unregister_device(client);
+       encoder->bus_priv = NULL;
+
+       module_put(module);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_destroy);
diff --git a/services4/3rdparty/linux_drm/drm_fb_helper.c b/services4/3rdparty/linux_drm/drm_fb_helper.c
new file mode 100644 (file)
index 0000000..d2849e4
--- /dev/null
@@ -0,0 +1,1532 @@
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include <linux/kernel.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
+static LIST_HEAD(kernel_fb_helper_list);
+
+/* simple single crtc case helper function */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_connector *connector;
+       int i;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct drm_fb_helper_connector *fb_helper_connector;
+
+               fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+               if (!fb_helper_connector)
+                       goto fail;
+
+               fb_helper_connector->connector = connector;
+               fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+       }
+       return 0;
+fail:
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               kfree(fb_helper->connector_info[i]);
+               fb_helper->connector_info[i] = NULL;
+       }
+       fb_helper->connector_count = 0;
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
+
+/**
+ * drm_fb_helper_connector_parse_command_line - parse command line for connector
+ * @connector - connector to parse line for
+ * @mode_option - per connector mode option
+ *
+ * This parses the connector specific then generic command lines for
+ * modes and options to configure the connector.
+ *
+ * This uses the same parameters as the fb modedb.c, except for extra
+ *     <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ *
+ * enable/enable Digital/disable bit at the end
+ */
+static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
+                                                      const char *mode_option)
+{
+       const char *name;
+       unsigned int namelen;
+       int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
+       unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
+       int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
+       int i;
+       enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
+       struct drm_fb_helper_cmdline_mode *cmdline_mode;
+       struct drm_connector *connector;
+
+       if (!fb_helper_conn)
+               return false;
+       connector = fb_helper_conn->connector;
+
+       cmdline_mode = &fb_helper_conn->cmdline_mode;
+       if (!mode_option)
+               mode_option = fb_mode_option;
+
+       if (!mode_option) {
+               cmdline_mode->specified = false;
+               return false;
+       }
+
+       name = mode_option;
+       namelen = strlen(name);
+       for (i = namelen-1; i >= 0; i--) {
+               switch (name[i]) {
+               case '@':
+                       namelen = i;
+                       if (!refresh_specified && !bpp_specified &&
+                           !yres_specified) {
+                               refresh = simple_strtol(&name[i+1], NULL, 10);
+                               refresh_specified = 1;
+                               if (cvt || rb)
+                                       cvt = 0;
+                       } else
+                               goto done;
+                       break;
+               case '-':
+                       namelen = i;
+                       if (!bpp_specified && !yres_specified) {
+                               bpp = simple_strtol(&name[i+1], NULL, 10);
+                               bpp_specified = 1;
+                               if (cvt || rb)
+                                       cvt = 0;
+                       } else
+                               goto done;
+                       break;
+               case 'x':
+                       if (!yres_specified) {
+                               yres = simple_strtol(&name[i+1], NULL, 10);
+                               yres_specified = 1;
+                       } else
+                               goto done;
+               case '0' ... '9':
+                       break;
+               case 'M':
+                       if (!yres_specified)
+                               cvt = 1;
+                       break;
+               case 'R':
+                       if (cvt)
+                               rb = 1;
+                       break;
+               case 'm':
+                       if (!cvt)
+                               margins = 1;
+                       break;
+               case 'i':
+                       if (!cvt)
+                               interlace = 1;
+                       break;
+               case 'e':
+                       force = DRM_FORCE_ON;
+                       break;
+               case 'D':
+                       if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
+                           (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
+                               force = DRM_FORCE_ON;
+                       else
+                               force = DRM_FORCE_ON_DIGITAL;
+                       break;
+               case 'd':
+                       force = DRM_FORCE_OFF;
+                       break;
+               default:
+                       goto done;
+               }
+       }
+       if (i < 0 && yres_specified) {
+               xres = simple_strtol(name, NULL, 10);
+               res_specified = 1;
+       }
+done:
+
+       DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+               drm_get_connector_name(connector), xres, yres,
+               (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
+               "", (margins) ? " with margins" : "", (interlace) ?
+               " interlaced" : "");
+
+       if (force) {
+               const char *s;
+               switch (force) {
+               case DRM_FORCE_OFF: s = "OFF"; break;
+               case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
+               default:
+               case DRM_FORCE_ON: s = "ON"; break;
+               }
+
+               DRM_INFO("forcing %s connector %s\n",
+                        drm_get_connector_name(connector), s);
+               connector->force = force;
+       }
+
+       if (res_specified) {
+               cmdline_mode->specified = true;
+               cmdline_mode->xres = xres;
+               cmdline_mode->yres = yres;
+       }
+
+       if (refresh_specified) {
+               cmdline_mode->refresh_specified = true;
+               cmdline_mode->refresh = refresh;
+       }
+
+       if (bpp_specified) {
+               cmdline_mode->bpp_specified = true;
+               cmdline_mode->bpp = bpp;
+       }
+       cmdline_mode->rb = rb ? true : false;
+       cmdline_mode->cvt = cvt  ? true : false;
+       cmdline_mode->interlace = interlace ? true : false;
+
+       return true;
+}
+
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
+{
+       struct drm_fb_helper_connector *fb_helper_conn;
+       int i;
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               char *option = NULL;
+
+               fb_helper_conn = fb_helper->connector_info[i];
+
+               /* do something on return - turn off connector maybe */
+               if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
+                       continue;
+
+               drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
+       }
+       return 0;
+}
+
+static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+       uint16_t *r_base, *g_base, *b_base;
+       int i;
+
+       r_base = crtc->gamma_store;
+       g_base = r_base + crtc->gamma_size;
+       b_base = g_base + crtc->gamma_size;
+
+       for (i = 0; i < crtc->gamma_size; i++)
+               helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+       uint16_t *r_base, *g_base, *b_base;
+
+       r_base = crtc->gamma_store;
+       g_base = r_base + crtc->gamma_size;
+       b_base = g_base + crtc->gamma_size;
+
+       crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+}
+
+int drm_fb_helper_debug_enter(struct fb_info *info)
+{
+       struct drm_fb_helper *helper = info->par;
+       struct drm_crtc_helper_funcs *funcs;
+       int i;
+
+       if (list_empty(&kernel_fb_helper_list))
+               return false;
+
+       list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+               for (i = 0; i < helper->crtc_count; i++) {
+                       struct drm_mode_set *mode_set =
+                               &helper->crtc_info[i].mode_set;
+
+                       if (!mode_set->crtc->enabled)
+                               continue;
+
+                       funcs = mode_set->crtc->helper_private;
+                       drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
+                       funcs->mode_set_base_atomic(mode_set->crtc,
+                                                   mode_set->fb,
+                                                   mode_set->x,
+                                                   mode_set->y,
+                                                   ENTER_ATOMIC_MODE_SET);
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_enter);
+
+/* Find the real fb for a given fb helper CRTC */
+static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_crtc *c;
+
+       list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+               if (crtc->base.id == c->base.id)
+                       return c->fb;
+       }
+
+       return NULL;
+}
+
+int drm_fb_helper_debug_leave(struct fb_info *info)
+{
+       struct drm_fb_helper *helper = info->par;
+       struct drm_crtc *crtc;
+       struct drm_crtc_helper_funcs *funcs;
+       struct drm_framebuffer *fb;
+       int i;
+
+       for (i = 0; i < helper->crtc_count; i++) {
+               struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+               crtc = mode_set->crtc;
+               funcs = crtc->helper_private;
+               fb = drm_mode_config_fb(crtc);
+
+               if (!crtc->enabled)
+                       continue;
+
+               if (!fb) {
+                       DRM_ERROR("no fb to restore??\n");
+                       continue;
+               }
+
+               drm_fb_helper_restore_lut_atomic(mode_set->crtc);
+               funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
+                                           crtc->y, LEAVE_ATOMIC_MODE_SET);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+
+bool drm_fb_helper_force_kernel_mode(void)
+{
+       int i = 0;
+       bool ret, error = false;
+       struct drm_fb_helper *helper;
+
+       if (list_empty(&kernel_fb_helper_list))
+               return false;
+
+       list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+               for (i = 0; i < helper->crtc_count; i++) {
+                       struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+                       ret = drm_crtc_helper_set_config(mode_set);
+                       if (ret)
+                               error = true;
+               }
+       }
+       return error;
+}
+
+int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
+                       void *panic_str)
+{
+       printk(KERN_ERR "panic occurred, switching back to text console\n");
+       return drm_fb_helper_force_kernel_mode();
+       return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_panic);
+
+static struct notifier_block paniced = {
+       .notifier_call = drm_fb_helper_panic,
+};
+
+/**
+ * drm_fb_helper_restore - restore the framebuffer console (kernel) config
+ *
+ * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
+ */
+void drm_fb_helper_restore(void)
+{
+       bool ret;
+       ret = drm_fb_helper_force_kernel_mode();
+       if (ret == true)
+               DRM_ERROR("Failed to restore crtc configuration\n");
+}
+EXPORT_SYMBOL(drm_fb_helper_restore);
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
+{
+       drm_fb_helper_restore();
+}
+static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
+
+static void drm_fb_helper_sysrq(int dummy1)
+{
+       schedule_work(&drm_fb_helper_restore_work);
+}
+
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
+       .handler = drm_fb_helper_sysrq,
+       .help_msg = "force-fb(V)",
+       .action_msg = "Restore framebuffer console",
+};
+#else
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
+#endif
+
+static void drm_fb_helper_on(struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_crtc *crtc;
+       struct drm_crtc_helper_funcs *crtc_funcs;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       int i, j;
+
+       /*
+        * For each CRTC in this fb, turn the crtc on then,
+        * find all associated encoders and turn them on.
+        */
+       mutex_lock(&dev->mode_config.mutex);
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               crtc = fb_helper->crtc_info[i].mode_set.crtc;
+               crtc_funcs = crtc->helper_private;
+
+               if (!crtc->enabled)
+                       continue;
+
+               crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+
+               /* Walk the connectors & encoders on this fb turning them on */
+               for (j = 0; j < fb_helper->connector_count; j++) {
+                       connector = fb_helper->connector_info[j]->connector;
+                       connector->dpms = DRM_MODE_DPMS_ON;
+                       drm_connector_property_set_value(connector,
+                                                        dev->mode_config.dpms_property,
+                                                        DRM_MODE_DPMS_ON);
+               }
+               /* Found a CRTC on this fb, now find encoders */
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       if (encoder->crtc == crtc) {
+                               struct drm_encoder_helper_funcs *encoder_funcs;
+
+                               encoder_funcs = encoder->helper_private;
+                               encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+                       }
+               }
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_crtc *crtc;
+       struct drm_crtc_helper_funcs *crtc_funcs;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       int i, j;
+
+       /*
+        * For each CRTC in this fb, find all associated encoders
+        * and turn them off, then turn off the CRTC.
+        */
+       mutex_lock(&dev->mode_config.mutex);
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               crtc = fb_helper->crtc_info[i].mode_set.crtc;
+               crtc_funcs = crtc->helper_private;
+
+               if (!crtc->enabled)
+                       continue;
+
+               /* Walk the connectors on this fb and mark them off */
+               for (j = 0; j < fb_helper->connector_count; j++) {
+                       connector = fb_helper->connector_info[j]->connector;
+                       connector->dpms = dpms_mode;
+                       drm_connector_property_set_value(connector,
+                                                        dev->mode_config.dpms_property,
+                                                        dpms_mode);
+               }
+               /* Found a CRTC on this fb, now find encoders */
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       if (encoder->crtc == crtc) {
+                               struct drm_encoder_helper_funcs *encoder_funcs;
+
+                               encoder_funcs = encoder->helper_private;
+                               encoder_funcs->dpms(encoder, dpms_mode);
+                       }
+               }
+               crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+int drm_fb_helper_blank(int blank, struct fb_info *info)
+{
+       switch (blank) {
+       /* Display: On; HSync: On, VSync: On */
+       case FB_BLANK_UNBLANK:
+               drm_fb_helper_on(info);
+               break;
+       /* Display: Off; HSync: On, VSync: On */
+       case FB_BLANK_NORMAL:
+               drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+               break;
+       /* Display: Off; HSync: Off, VSync: On */
+       case FB_BLANK_HSYNC_SUSPEND:
+               drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+               break;
+       /* Display: Off; HSync: On, VSync: Off */
+       case FB_BLANK_VSYNC_SUSPEND:
+               drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
+               break;
+       /* Display: Off; HSync: Off, VSync: Off */
+       case FB_BLANK_POWERDOWN:
+               drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
+               break;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_blank);
+
+static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
+{
+       int i;
+
+       for (i = 0; i < helper->connector_count; i++)
+               kfree(helper->connector_info[i]);
+       kfree(helper->connector_info);
+       for (i = 0; i < helper->crtc_count; i++)
+               kfree(helper->crtc_info[i].mode_set.connectors);
+       kfree(helper->crtc_info);
+}
+
+int drm_fb_helper_init(struct drm_device *dev,
+                      struct drm_fb_helper *fb_helper,
+                      int crtc_count, int max_conn_count)
+{
+       struct drm_crtc *crtc;
+       int ret = 0;
+       int i;
+
+       fb_helper->dev = dev;
+
+       INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+       fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+       if (!fb_helper->crtc_info)
+               return -ENOMEM;
+
+       fb_helper->crtc_count = crtc_count;
+       fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+       if (!fb_helper->connector_info) {
+               kfree(fb_helper->crtc_info);
+               return -ENOMEM;
+       }
+       fb_helper->connector_count = 0;
+
+       for (i = 0; i < crtc_count; i++) {
+               fb_helper->crtc_info[i].mode_set.connectors =
+                       kcalloc(max_conn_count,
+                               sizeof(struct drm_connector *),
+                               GFP_KERNEL);
+
+               if (!fb_helper->crtc_info[i].mode_set.connectors) {
+                       ret = -ENOMEM;
+                       goto out_free;
+               }
+               fb_helper->crtc_info[i].mode_set.num_connectors = 0;
+       }
+
+       i = 0;
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               fb_helper->crtc_info[i].crtc_id = crtc->base.id;
+               fb_helper->crtc_info[i].mode_set.crtc = crtc;
+               i++;
+       }
+       fb_helper->conn_limit = max_conn_count;
+       return 0;
+out_free:
+       drm_fb_helper_crtc_free(fb_helper);
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+       if (!list_empty(&fb_helper->kernel_fb_list)) {
+               list_del(&fb_helper->kernel_fb_list);
+               if (list_empty(&kernel_fb_helper_list)) {
+                       printk(KERN_INFO "drm: unregistered panic notifier\n");
+                       atomic_notifier_chain_unregister(&panic_notifier_list,
+                                                        &paniced);
+                       unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+               }
+       }
+
+       drm_fb_helper_crtc_free(fb_helper);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
+
+static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
+                    u16 blue, u16 regno, struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct drm_framebuffer *fb = fb_helper->fb;
+       int pindex;
+
+       if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+               u32 *palette;
+               u32 value;
+               /* place color in psuedopalette */
+               if (regno > 16)
+                       return -EINVAL;
+               palette = (u32 *)info->pseudo_palette;
+               red >>= (16 - info->var.red.length);
+               green >>= (16 - info->var.green.length);
+               blue >>= (16 - info->var.blue.length);
+               value = (red << info->var.red.offset) |
+                       (green << info->var.green.offset) |
+                       (blue << info->var.blue.offset);
+               palette[regno] = value;
+               return 0;
+       }
+
+       pindex = regno;
+
+       if (fb->bits_per_pixel == 16) {
+               pindex = regno << 3;
+
+               if (fb->depth == 16 && regno > 63)
+                       return -EINVAL;
+               if (fb->depth == 15 && regno > 31)
+                       return -EINVAL;
+
+               if (fb->depth == 16) {
+                       u16 r, g, b;
+                       int i;
+                       if (regno < 32) {
+                               for (i = 0; i < 8; i++)
+                                       fb_helper->funcs->gamma_set(crtc, red,
+                                               green, blue, pindex + i);
+                       }
+
+                       fb_helper->funcs->gamma_get(crtc, &r,
+                                                   &g, &b,
+                                                   pindex >> 1);
+
+                       for (i = 0; i < 4; i++)
+                               fb_helper->funcs->gamma_set(crtc, r,
+                                                           green, b,
+                                                           (pindex >> 1) + i);
+               }
+       }
+
+       if (fb->depth != 16)
+               fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
+       return 0;
+}
+
+int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct drm_crtc_helper_funcs *crtc_funcs;
+       u16 *red, *green, *blue, *transp;
+       struct drm_crtc *crtc;
+       int i, rc = 0;
+       int start;
+
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               crtc = fb_helper->crtc_info[i].mode_set.crtc;
+               crtc_funcs = crtc->helper_private;
+
+               red = cmap->red;
+               green = cmap->green;
+               blue = cmap->blue;
+               transp = cmap->transp;
+               start = cmap->start;
+
+               for (i = 0; i < cmap->len; i++) {
+                       u16 hred, hgreen, hblue, htransp = 0xffff;
+
+                       hred = *red++;
+                       hgreen = *green++;
+                       hblue = *blue++;
+
+                       if (transp)
+                               htransp = *transp++;
+
+                       rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
+                       if (rc)
+                               return rc;
+               }
+               crtc_funcs->load_lut(crtc);
+       }
+       return rc;
+}
+EXPORT_SYMBOL(drm_fb_helper_setcmap);
+
+int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+                           struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct drm_framebuffer *fb = fb_helper->fb;
+       int depth;
+
+       if (var->pixclock != 0 || in_dbg_master())
+               return -EINVAL;
+
+       /* Need to resize the fb object !!! */
+       if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
+               DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+                         "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
+                         fb->width, fb->height, fb->bits_per_pixel);
+               return -EINVAL;
+       }
+
+       switch (var->bits_per_pixel) {
+       case 16:
+               depth = (var->green.length == 6) ? 16 : 15;
+               break;
+       case 32:
+               depth = (var->transp.length > 0) ? 32 : 24;
+               break;
+       default:
+               depth = var->bits_per_pixel;
+               break;
+       }
+
+       switch (depth) {
+       case 8:
+               var->red.offset = 0;
+               var->green.offset = 0;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 15:
+               var->red.offset = 10;
+               var->green.offset = 5;
+               var->blue.offset = 0;
+               var->red.length = 5;
+               var->green.length = 5;
+               var->blue.length = 5;
+               var->transp.length = 1;
+               var->transp.offset = 15;
+               break;
+       case 16:
+               var->red.offset = 11;
+               var->green.offset = 5;
+               var->blue.offset = 0;
+               var->red.length = 5;
+               var->green.length = 6;
+               var->blue.length = 5;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 24:
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 32:
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 8;
+               var->transp.offset = 24;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_check_var);
+
+/* this will let fbcon do the mode init */
+int drm_fb_helper_set_par(struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct drm_device *dev = fb_helper->dev;
+       struct fb_var_screeninfo *var = &info->var;
+       struct drm_crtc *crtc;
+       int ret;
+       int i;
+
+       if (var->pixclock != 0) {
+               DRM_ERROR("PIXEL CLOCK SET\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               crtc = fb_helper->crtc_info[i].mode_set.crtc;
+               ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+               if (ret) {
+                       mutex_unlock(&dev->mode_config.mutex);
+                       return ret;
+               }
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+
+       if (fb_helper->delayed_hotplug) {
+               fb_helper->delayed_hotplug = false;
+               drm_fb_helper_hotplug_event(fb_helper);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_set_par);
+
+int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+                             struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_mode_set *modeset;
+       struct drm_crtc *crtc;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&dev->mode_config.mutex);
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+               modeset = &fb_helper->crtc_info[i].mode_set;
+
+               modeset->x = var->xoffset;
+               modeset->y = var->yoffset;
+
+               if (modeset->num_connectors) {
+                       ret = crtc->funcs->set_config(modeset);
+                       if (!ret) {
+                               info->var.xoffset = var->xoffset;
+                               info->var.yoffset = var->yoffset;
+                       }
+               }
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_pan_display);
+
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+                                 int preferred_bpp)
+{
+       int new_fb = 0;
+       int crtc_count = 0;
+       int i;
+       struct fb_info *info;
+       struct drm_fb_helper_surface_size sizes;
+       int gamma_size = 0;
+
+       memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+       sizes.surface_depth = 24;
+       sizes.surface_bpp = 32;
+       sizes.fb_width = (unsigned)-1;
+       sizes.fb_height = (unsigned)-1;
+
+       /* if driver picks 8 or 16 by default use that
+          for both depth/bpp */
+       if (preferred_bpp != sizes.surface_bpp) {
+               sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
+       }
+       /* first up get a count of crtcs now in use and new min/maxes width/heights */
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
+               struct drm_fb_helper_cmdline_mode *cmdline_mode;
+
+               cmdline_mode = &fb_helper_conn->cmdline_mode;
+
+               if (cmdline_mode->bpp_specified) {
+                       switch (cmdline_mode->bpp) {
+                       case 8:
+                               sizes.surface_depth = sizes.surface_bpp = 8;
+                               break;
+                       case 15:
+                               sizes.surface_depth = 15;
+                               sizes.surface_bpp = 16;
+                               break;
+                       case 16:
+                               sizes.surface_depth = sizes.surface_bpp = 16;
+                               break;
+                       case 24:
+                               sizes.surface_depth = sizes.surface_bpp = 24;
+                               break;
+                       case 32:
+                               sizes.surface_depth = 24;
+                               sizes.surface_bpp = 32;
+                               break;
+                       }
+                       break;
+               }
+       }
+
+       crtc_count = 0;
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               struct drm_display_mode *desired_mode;
+               desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+               if (desired_mode) {
+                       if (gamma_size == 0)
+                               gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+                       if (desired_mode->hdisplay < sizes.fb_width)
+                               sizes.fb_width = desired_mode->hdisplay;
+                       if (desired_mode->vdisplay < sizes.fb_height)
+                               sizes.fb_height = desired_mode->vdisplay;
+                       if (desired_mode->hdisplay > sizes.surface_width)
+                               sizes.surface_width = desired_mode->hdisplay;
+                       if (desired_mode->vdisplay > sizes.surface_height)
+                               sizes.surface_height = desired_mode->vdisplay;
+                       crtc_count++;
+               }
+       }
+
+       if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
+               /* hmm everyone went away - assume VGA cable just fell out
+                  and will come back later. */
+               DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+               sizes.fb_width = sizes.surface_width = 1024;
+               sizes.fb_height = sizes.surface_height = 768;
+       }
+
+       /* push down into drivers */
+       new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+       if (new_fb < 0)
+               return new_fb;
+
+       info = fb_helper->fbdev;
+
+       /* set the fb pointer */
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+       }
+
+       if (new_fb) {
+               info->var.pixclock = 0;
+               if (register_framebuffer(info) < 0) {
+                       return -EINVAL;
+               }
+
+               printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+                      info->fix.id);
+
+       } else {
+               drm_fb_helper_set_par(info);
+       }
+
+       /* Switch back to kernel console on panic */
+       /* multi card linked list maybe */
+       if (list_empty(&kernel_fb_helper_list)) {
+               printk(KERN_INFO "drm: registered panic notifier\n");
+               atomic_notifier_chain_register(&panic_notifier_list,
+                                              &paniced);
+               register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+       }
+       if (new_fb)
+               list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
+
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+                           uint32_t depth)
+{
+       info->fix.type = FB_TYPE_PACKED_PIXELS;
+       info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+               FB_VISUAL_TRUECOLOR;
+       info->fix.type_aux = 0;
+       info->fix.xpanstep = 1; /* doing it in hw */
+       info->fix.ypanstep = 1; /* doing it in hw */
+       info->fix.ywrapstep = 0;
+       info->fix.accel = FB_ACCEL_NONE;
+       info->fix.type_aux = 0;
+
+       info->fix.line_length = pitch;
+       return;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
+                           uint32_t fb_width, uint32_t fb_height)
+{
+       struct drm_framebuffer *fb = fb_helper->fb;
+       info->pseudo_palette = fb_helper->pseudo_palette;
+       info->var.xres_virtual = fb->width;
+       info->var.yres_virtual = fb->height;
+       info->var.bits_per_pixel = fb->bits_per_pixel;
+       info->var.xoffset = 0;
+       info->var.yoffset = 0;
+       info->var.activate = FB_ACTIVATE_NOW;
+       info->var.height = -1;
+       info->var.width = -1;
+
+       switch (fb->depth) {
+       case 8:
+               info->var.red.offset = 0;
+               info->var.green.offset = 0;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8; /* 8bit DAC */
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 15:
+               info->var.red.offset = 10;
+               info->var.green.offset = 5;
+               info->var.blue.offset = 0;
+               info->var.red.length = 5;
+               info->var.green.length = 5;
+               info->var.blue.length = 5;
+               info->var.transp.offset = 15;
+               info->var.transp.length = 1;
+               break;
+       case 16:
+               info->var.red.offset = 11;
+               info->var.green.offset = 5;
+               info->var.blue.offset = 0;
+               info->var.red.length = 5;
+               info->var.green.length = 6;
+               info->var.blue.length = 5;
+               info->var.transp.offset = 0;
+               break;
+       case 24:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8;
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 32:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8;
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 24;
+               info->var.transp.length = 8;
+               break;
+       default:
+               break;
+       }
+
+       info->var.xres = fb_width;
+       info->var.yres = fb_height;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+                                              uint32_t maxX,
+                                              uint32_t maxY)
+{
+       struct drm_connector *connector;
+       int count = 0;
+       int i;
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               connector = fb_helper->connector_info[i]->connector;
+               count += connector->funcs->fill_modes(connector, maxX, maxY);
+       }
+
+       return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+       struct drm_display_mode *mode;
+
+       list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+               if (drm_mode_width(mode) > width ||
+                   drm_mode_height(mode) > height)
+                       continue;
+               if (mode->type & DRM_MODE_TYPE_PREFERRED)
+                       return mode;
+       }
+       return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+       struct drm_fb_helper_cmdline_mode *cmdline_mode;
+       cmdline_mode = &fb_connector->cmdline_mode;
+       return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+                                                     int width, int height)
+{
+       struct drm_fb_helper_cmdline_mode *cmdline_mode;
+       struct drm_display_mode *mode = NULL;
+
+       cmdline_mode = &fb_helper_conn->cmdline_mode;
+       if (cmdline_mode->specified == false)
+               return mode;
+
+       /* attempt to find a matching mode in the list of modes
+        *  we have gotten so far, if not add a CVT mode that conforms
+        */
+       if (cmdline_mode->rb || cmdline_mode->margins)
+               goto create_mode;
+
+       list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+               /* check width/height */
+               if (mode->hdisplay != cmdline_mode->xres ||
+                   mode->vdisplay != cmdline_mode->yres)
+                       continue;
+
+               if (cmdline_mode->refresh_specified) {
+                       if (mode->vrefresh != cmdline_mode->refresh)
+                               continue;
+               }
+
+               if (cmdline_mode->interlace) {
+                       if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+                               continue;
+               }
+               return mode;
+       }
+
+create_mode:
+       if (cmdline_mode->cvt)
+               mode = drm_cvt_mode(fb_helper_conn->connector->dev,
+                                   cmdline_mode->xres, cmdline_mode->yres,
+                                   cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+                                   cmdline_mode->rb, cmdline_mode->interlace,
+                                   cmdline_mode->margins);
+       else
+               mode = drm_gtf_mode(fb_helper_conn->connector->dev,
+                                   cmdline_mode->xres, cmdline_mode->yres,
+                                   cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+                                   cmdline_mode->interlace,
+                                   cmdline_mode->margins);
+       drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+       list_add(&mode->head, &fb_helper_conn->connector->modes);
+       return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+       bool enable;
+
+       if (strict) {
+               enable = connector->status == connector_status_connected;
+       } else {
+               enable = connector->status != connector_status_disconnected;
+       }
+       return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+                                 bool *enabled)
+{
+       bool any_enabled = false;
+       struct drm_connector *connector;
+       int i = 0;
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               connector = fb_helper->connector_info[i]->connector;
+               enabled[i] = drm_connector_enabled(connector, true);
+               DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+                         enabled[i] ? "yes" : "no");
+               any_enabled |= enabled[i];
+       }
+
+       if (any_enabled)
+               return;
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               connector = fb_helper->connector_info[i]->connector;
+               enabled[i] = drm_connector_enabled(connector, false);
+       }
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+                             struct drm_display_mode **modes,
+                             bool *enabled, int width, int height)
+{
+       int count, i, j;
+       bool can_clone = false;
+       struct drm_fb_helper_connector *fb_helper_conn;
+       struct drm_display_mode *dmt_mode, *mode;
+
+       /* only contemplate cloning in the single crtc case */
+       if (fb_helper->crtc_count > 1)
+               return false;
+
+       count = 0;
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               if (enabled[i])
+                       count++;
+       }
+
+       /* only contemplate cloning if more than one connector is enabled */
+       if (count <= 1)
+               return false;
+
+       /* check the command line or if nothing common pick 1024x768 */
+       can_clone = true;
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               if (!enabled[i])
+                       continue;
+               fb_helper_conn = fb_helper->connector_info[i];
+               modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+               if (!modes[i]) {
+                       can_clone = false;
+                       break;
+               }
+               for (j = 0; j < i; j++) {
+                       if (!enabled[j])
+                               continue;
+                       if (!drm_mode_equal(modes[j], modes[i]))
+                               can_clone = false;
+               }
+       }
+
+       if (can_clone) {
+               DRM_DEBUG_KMS("can clone using command line\n");
+               return true;
+       }
+
+       /* try and find a 1024x768 mode on each connector */
+       can_clone = true;
+       dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+
+               if (!enabled[i])
+                       continue;
+
+               fb_helper_conn = fb_helper->connector_info[i];
+               list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+                       if (drm_mode_equal(mode, dmt_mode))
+                               modes[i] = mode;
+               }
+               if (!modes[i])
+                       can_clone = false;
+       }
+
+       if (can_clone) {
+               DRM_DEBUG_KMS("can clone using 1024x768\n");
+               return true;
+       }
+       DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+       return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+                                struct drm_display_mode **modes,
+                                bool *enabled, int width, int height)
+{
+       struct drm_fb_helper_connector *fb_helper_conn;
+       int i;
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               fb_helper_conn = fb_helper->connector_info[i];
+
+               if (enabled[i] == false)
+                       continue;
+
+               DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+                             fb_helper_conn->connector->base.id);
+
+               /* got for command line mode first */
+               modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+               if (!modes[i]) {
+                       DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+                                     fb_helper_conn->connector->base.id);
+                       modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+               }
+               /* No preferred modes, pick one off the list */
+               if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+                       list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+                               break;
+               }
+               DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+                         "none");
+       }
+       return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+                         struct drm_fb_helper_crtc **best_crtcs,
+                         struct drm_display_mode **modes,
+                         int n, int width, int height)
+{
+       int c, o;
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_connector *connector;
+       struct drm_connector_helper_funcs *connector_funcs;
+       struct drm_encoder *encoder;
+       struct drm_fb_helper_crtc *best_crtc;
+       int my_score, best_score, score;
+       struct drm_fb_helper_crtc **crtcs, *crtc;
+       struct drm_fb_helper_connector *fb_helper_conn;
+
+       if (n == fb_helper->connector_count)
+               return 0;
+
+       fb_helper_conn = fb_helper->connector_info[n];
+       connector = fb_helper_conn->connector;
+
+       best_crtcs[n] = NULL;
+       best_crtc = NULL;
+       best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+       if (modes[n] == NULL)
+               return best_score;
+
+       crtcs = kzalloc(dev->mode_config.num_connector *
+                       sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+       if (!crtcs)
+               return best_score;
+
+       my_score = 1;
+       if (connector->status == connector_status_connected)
+               my_score++;
+       if (drm_has_cmdline_mode(fb_helper_conn))
+               my_score++;
+       if (drm_has_preferred_mode(fb_helper_conn, width, height))
+               my_score++;
+
+       connector_funcs = connector->helper_private;
+       encoder = connector_funcs->best_encoder(connector);
+       if (!encoder)
+               goto out;
+
+       /* select a crtc for this connector and then attempt to configure
+          remaining connectors */
+       for (c = 0; c < fb_helper->crtc_count; c++) {
+               crtc = &fb_helper->crtc_info[c];
+
+               if ((encoder->possible_crtcs & (1 << c)) == 0) {
+                       continue;
+               }
+
+               for (o = 0; o < n; o++)
+                       if (best_crtcs[o] == crtc)
+                               break;
+
+               if (o < n) {
+                       /* ignore cloning unless only a single crtc */
+                       if (fb_helper->crtc_count > 1)
+                               continue;
+
+                       if (!drm_mode_equal(modes[o], modes[n]))
+                               continue;
+               }
+
+               crtcs[n] = crtc;
+               memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+               score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+                                                 width, height);
+               if (score > best_score) {
+                       best_crtc = crtc;
+                       best_score = score;
+                       memcpy(best_crtcs, crtcs,
+                              dev->mode_config.num_connector *
+                              sizeof(struct drm_fb_helper_crtc *));
+               }
+       }
+out:
+       kfree(crtcs);
+       return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_fb_helper_crtc **crtcs;
+       struct drm_display_mode **modes;
+       struct drm_encoder *encoder;
+       struct drm_mode_set *modeset;
+       bool *enabled;
+       int width, height;
+       int i, ret;
+
+       DRM_DEBUG_KMS("\n");
+
+       width = dev->mode_config.max_width;
+       height = dev->mode_config.max_height;
+
+       /* clean out all the encoder/crtc combos */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               encoder->crtc = NULL;
+       }
+
+       crtcs = kcalloc(dev->mode_config.num_connector,
+                       sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+       modes = kcalloc(dev->mode_config.num_connector,
+                       sizeof(struct drm_display_mode *), GFP_KERNEL);
+       enabled = kcalloc(dev->mode_config.num_connector,
+                         sizeof(bool), GFP_KERNEL);
+
+       drm_enable_connectors(fb_helper, enabled);
+
+       ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
+       if (!ret) {
+               ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
+               if (!ret)
+                       DRM_ERROR("Unable to find initial modes\n");
+       }
+
+       DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+
+       drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+
+       /* need to set the modesets up here for use later */
+       /* fill out the connector<->crtc mappings into the modesets */
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               modeset = &fb_helper->crtc_info[i].mode_set;
+               modeset->num_connectors = 0;
+       }
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               struct drm_display_mode *mode = modes[i];
+               struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+               modeset = &fb_crtc->mode_set;
+
+               if (mode && fb_crtc) {
+                       DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+                                     mode->name, fb_crtc->mode_set.crtc->base.id);
+                       fb_crtc->desired_mode = mode;
+                       if (modeset->mode)
+                               drm_mode_destroy(dev, modeset->mode);
+                       modeset->mode = drm_mode_duplicate(dev,
+                                                          fb_crtc->desired_mode);
+                       modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+               }
+       }
+
+       kfree(crtcs);
+       kfree(modes);
+       kfree(enabled);
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+       struct drm_device *dev = fb_helper->dev;
+       int count = 0;
+
+       /* disable all the possible outputs/crtcs before entering KMS mode */
+       drm_helper_disable_unused_functions(fb_helper->dev);
+
+       drm_fb_helper_parse_command_line(fb_helper);
+
+       count = drm_fb_helper_probe_connector_modes(fb_helper,
+                                                   dev->mode_config.max_width,
+                                                   dev->mode_config.max_height);
+       /*
+        * we shouldn't end up with no modes here.
+        */
+       if (count == 0) {
+               printk(KERN_INFO "No connectors reported connected with modes\n");
+       }
+       drm_setup_crtcs(fb_helper);
+
+       return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+       int count = 0;
+       u32 max_width, max_height, bpp_sel;
+       bool bound = false, crtcs_bound = false;
+       struct drm_crtc *crtc;
+
+       if (!fb_helper->fb)
+               return false;
+
+       list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
+               if (crtc->fb)
+                       crtcs_bound = true;
+               if (crtc->fb == fb_helper->fb)
+                       bound = true;
+       }
+
+       if (!bound && crtcs_bound) {
+               fb_helper->delayed_hotplug = true;
+               return false;
+       }
+       DRM_DEBUG_KMS("\n");
+
+       max_width = fb_helper->fb->width;
+       max_height = fb_helper->fb->height;
+       bpp_sel = fb_helper->fb->bits_per_pixel;
+
+       count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+                                                   max_height);
+       drm_setup_crtcs(fb_helper);
+
+       return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
diff --git a/services4/3rdparty/linux_drm/drm_fops.c b/services4/3rdparty/linux_drm/drm_fops.c
new file mode 100644 (file)
index 0000000..a39794b
--- /dev/null
@@ -0,0 +1,651 @@
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Daryll Strauss <daryll@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include <linux/poll.h>
+#include <linux/slab.h>
+
+/* from BKL pushdown: note that nothing else serializes idr_find() */
+DEFINE_MUTEX(drm_global_mutex);
+EXPORT_SYMBOL(drm_global_mutex);
+
+static int drm_open_helper(struct inode *inode, struct file *filp,
+                          struct drm_device * dev);
+
+static int drm_setup(struct drm_device * dev)
+{
+       int i;
+       int ret;
+
+       if (dev->driver->firstopen) {
+               ret = dev->driver->firstopen(dev);
+               if (ret != 0)
+                       return ret;
+       }
+
+       atomic_set(&dev->ioctl_count, 0);
+       atomic_set(&dev->vma_count, 0);
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               dev->buf_use = 0;
+               atomic_set(&dev->buf_alloc, 0);
+
+               i = drm_dma_setup(dev);
+               if (i < 0)
+                       return i;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+               atomic_set(&dev->counts[i], 0);
+
+       dev->sigdata.lock = NULL;
+
+       dev->queue_count = 0;
+       dev->queue_reserved = 0;
+       dev->queue_slots = 0;
+       dev->queuelist = NULL;
+       dev->context_flag = 0;
+       dev->interrupt_flag = 0;
+       dev->dma_flag = 0;
+       dev->last_context = 0;
+       dev->last_switch = 0;
+       dev->last_checked = 0;
+       init_waitqueue_head(&dev->context_wait);
+       dev->if_version = 0;
+
+       dev->ctx_start = 0;
+       dev->lck_start = 0;
+
+       dev->buf_async = NULL;
+       init_waitqueue_head(&dev->buf_readers);
+       init_waitqueue_head(&dev->buf_writers);
+
+       DRM_DEBUG("\n");
+
+       /*
+        * The kernel's context could be created here, but is now created
+        * in drm_dma_enqueue.  This is more resource-efficient for
+        * hardware that does not do DMA, but may mean that
+        * drm_select_queue fails between the time the interrupt is
+        * initialized and the time the queues are initialized.
+        */
+
+       return 0;
+}
+
+/**
+ * Open file.
+ *
+ * \param inode device inode
+ * \param filp file pointer.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the DRM device with the same minor number, calls open_helper(), and
+ * increments the device open count. If the open count was previous at zero,
+ * i.e., it's the first that the device is open, then calls setup().
+ */
+int drm_open(struct inode *inode, struct file *filp)
+{
+       struct drm_device *dev = NULL;
+       int minor_id = iminor(inode);
+       struct drm_minor *minor;
+       int retcode = 0;
+
+       minor = idr_find(&drm_minors_idr, minor_id);
+       if (!minor)
+               return -ENODEV;
+
+       if (!(dev = minor->dev))
+               return -ENODEV;
+
+       retcode = drm_open_helper(inode, filp, dev);
+       if (!retcode) {
+               atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+               if (!dev->open_count++)
+                       retcode = drm_setup(dev);
+       }
+       if (!retcode) {
+               mutex_lock(&dev->struct_mutex);
+               if (minor->type == DRM_MINOR_LEGACY) {
+                       if (dev->dev_mapping == NULL)
+                               dev->dev_mapping = inode->i_mapping;
+                       else if (dev->dev_mapping != inode->i_mapping)
+                               retcode = -ENODEV;
+               }
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       return retcode;
+}
+EXPORT_SYMBOL(drm_open);
+
+/**
+ * File \c open operation.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ *
+ * Puts the dev->fops corresponding to the device minor number into
+ * \p filp, call the \c open method, and restore the file operations.
+ */
+int drm_stub_open(struct inode *inode, struct file *filp)
+{
+       struct drm_device *dev = NULL;
+       struct drm_minor *minor;
+       int minor_id = iminor(inode);
+       int err = -ENODEV;
+       const struct file_operations *old_fops;
+
+       DRM_DEBUG("\n");
+
+       mutex_lock(&drm_global_mutex);
+       minor = idr_find(&drm_minors_idr, minor_id);
+       if (!minor)
+               goto out;
+
+       if (!(dev = minor->dev))
+               goto out;
+
+       old_fops = filp->f_op;
+       filp->f_op = fops_get(&dev->driver->fops);
+       if (filp->f_op == NULL) {
+               filp->f_op = old_fops;
+               goto out;
+       }
+       if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
+               fops_put(filp->f_op);
+               filp->f_op = fops_get(old_fops);
+       }
+       fops_put(old_fops);
+
+out:
+       mutex_unlock(&drm_global_mutex);
+       return err;
+}
+
+/**
+ * Check whether DRI will run on this CPU.
+ *
+ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
+ */
+static int drm_cpu_valid(void)
+{
+#if defined(__i386__)
+       if (boot_cpu_data.x86 == 3)
+               return 0;       /* No cmpxchg on a 386 */
+#endif
+#if defined(__sparc__) && !defined(__sparc_v9__)
+       return 0;               /* No cmpxchg before v9 sparc. */
+#endif
+       return 1;
+}
+
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param dev device.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct inode *inode, struct file *filp,
+                          struct drm_device * dev)
+{
+       int minor_id = iminor(inode);
+       struct drm_file *priv;
+       int ret;
+
+       if (filp->f_flags & O_EXCL)
+               return -EBUSY;  /* No exclusive opens */
+       if (!drm_cpu_valid())
+               return -EINVAL;
+
+       DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       filp->private_data = priv;
+       priv->filp = filp;
+       priv->uid = current_euid();
+       priv->pid = task_pid_nr(current);
+       priv->minor = idr_find(&drm_minors_idr, minor_id);
+       priv->ioctl_count = 0;
+       /* for compatibility root is always authenticated */
+       priv->authenticated = capable(CAP_SYS_ADMIN);
+       priv->lock_count = 0;
+
+       INIT_LIST_HEAD(&priv->lhead);
+       INIT_LIST_HEAD(&priv->fbs);
+       INIT_LIST_HEAD(&priv->event_list);
+       init_waitqueue_head(&priv->event_wait);
+       priv->event_space = 4096; /* set aside 4k for event buffer */
+
+       if (dev->driver->driver_features & DRIVER_GEM)
+               drm_gem_open(dev, priv);
+
+       if (dev->driver->open) {
+               ret = dev->driver->open(dev, priv);
+               if (ret < 0)
+                       goto out_free;
+       }
+
+
+       /* if there is no current master make this fd it */
+       mutex_lock(&dev->struct_mutex);
+       if (!priv->minor->master) {
+               /* create a new master */
+               priv->minor->master = drm_master_create(priv->minor);
+               if (!priv->minor->master) {
+                       mutex_unlock(&dev->struct_mutex);
+                       ret = -ENOMEM;
+                       goto out_free;
+               }
+
+               priv->is_master = 1;
+               /* take another reference for the copy in the local file priv */
+               priv->master = drm_master_get(priv->minor->master);
+
+               priv->authenticated = 1;
+
+               mutex_unlock(&dev->struct_mutex);
+               if (dev->driver->master_create) {
+                       ret = dev->driver->master_create(dev, priv->master);
+                       if (ret) {
+                               mutex_lock(&dev->struct_mutex);
+                               /* drop both references if this fails */
+                               drm_master_put(&priv->minor->master);
+                               drm_master_put(&priv->master);
+                               mutex_unlock(&dev->struct_mutex);
+                               goto out_free;
+                       }
+               }
+               mutex_lock(&dev->struct_mutex);
+               if (dev->driver->master_set) {
+                       ret = dev->driver->master_set(dev, priv, true);
+                       if (ret) {
+                               /* drop both references if this fails */
+                               drm_master_put(&priv->minor->master);
+                               drm_master_put(&priv->master);
+                               mutex_unlock(&dev->struct_mutex);
+                               goto out_free;
+                       }
+               }
+               mutex_unlock(&dev->struct_mutex);
+       } else {
+               /* get a reference to the master */
+               priv->master = drm_master_get(priv->minor->master);
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       list_add(&priv->lhead, &dev->filelist);
+       mutex_unlock(&dev->struct_mutex);
+
+#ifdef __alpha__
+       /*
+        * Default the hose
+        */
+       if (!dev->hose) {
+               struct pci_dev *pci_dev;
+               pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+               if (pci_dev) {
+                       dev->hose = pci_dev->sysdata;
+                       pci_dev_put(pci_dev);
+               }
+               if (!dev->hose) {
+                       struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+                       if (b)
+                               dev->hose = b->sysdata;
+               }
+       }
+#endif
+
+       return 0;
+      out_free:
+       kfree(priv);
+       filp->private_data = NULL;
+       return ret;
+}
+
+/** No-op. */
+int drm_fasync(int fd, struct file *filp, int on)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+
+       DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
+                 (long)old_encode_dev(priv->minor->device));
+       return fasync_helper(fd, filp, on, &dev->buf_async);
+}
+EXPORT_SYMBOL(drm_fasync);
+
+/*
+ * Reclaim locked buffers; note that this may be a bad idea if the current
+ * context doesn't have the hw lock...
+ */
+static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
+{
+       struct drm_file *file_priv = f->private_data;
+
+       if (drm_i_have_hw_lock(dev, file_priv)) {
+               dev->driver->reclaim_buffers_locked(dev, file_priv);
+       } else {
+               unsigned long _end = jiffies + 3 * DRM_HZ;
+               int locked = 0;
+
+               drm_idlelock_take(&file_priv->master->lock);
+
+               /*
+                * Wait for a while.
+                */
+               do {
+                       spin_lock_bh(&file_priv->master->lock.spinlock);
+                       locked = file_priv->master->lock.idle_has_lock;
+                       spin_unlock_bh(&file_priv->master->lock.spinlock);
+                       if (locked)
+                               break;
+                       schedule();
+               } while (!time_after_eq(jiffies, _end));
+
+               if (!locked) {
+                       DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
+                                 "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
+                                 "\tI will go on reclaiming the buffers anyway.\n");
+               }
+
+               dev->driver->reclaim_buffers_locked(dev, file_priv);
+               drm_idlelock_release(&file_priv->master->lock);
+       }
+}
+
+static void drm_master_release(struct drm_device *dev, struct file *filp)
+{
+       struct drm_file *file_priv = filp->private_data;
+
+       if (dev->driver->reclaim_buffers_locked &&
+           file_priv->master->lock.hw_lock)
+               drm_reclaim_locked_buffers(dev, filp);
+
+       if (dev->driver->reclaim_buffers_idlelocked &&
+           file_priv->master->lock.hw_lock) {
+               drm_idlelock_take(&file_priv->master->lock);
+               dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
+               drm_idlelock_release(&file_priv->master->lock);
+       }
+
+
+       if (drm_i_have_hw_lock(dev, file_priv)) {
+               DRM_DEBUG("File %p released, freeing lock for context %d\n",
+                         filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+               drm_lock_free(&file_priv->master->lock,
+                             _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !dev->driver->reclaim_buffers_locked) {
+               dev->driver->reclaim_buffers(dev, file_priv);
+       }
+}
+
+static void drm_events_release(struct drm_file *file_priv)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_pending_event *e, *et;
+       struct drm_pending_vblank_event *v, *vt;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       /* Remove pending flips */
+       list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
+               if (v->base.file_priv == file_priv) {
+                       list_del(&v->base.link);
+                       drm_vblank_put(dev, v->pipe);
+                       v->base.destroy(&v->base);
+               }
+
+       /* Remove unconsumed events */
+       list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+               e->destroy(e);
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * Release file.
+ *
+ * \param inode device inode
+ * \param file_priv DRM file private.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the hardware lock is held then free it, and take it again for the kernel
+ * context since it's necessary to reclaim buffers. Unlink the file private
+ * data from its list and free it. Decreases the open count and if it reaches
+ * zero calls drm_lastclose().
+ */
+int drm_release(struct inode *inode, struct file *filp)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev = file_priv->minor->dev;
+       int retcode = 0;
+
+       mutex_lock(&drm_global_mutex);
+
+       DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+       if (dev->driver->preclose)
+               dev->driver->preclose(dev, file_priv);
+
+       /* ========================================================
+        * Begin inline drm_release
+        */
+
+       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+                 task_pid_nr(current),
+                 (long)old_encode_dev(file_priv->minor->device),
+                 dev->open_count);
+
+       /* if the master has gone away we can't do anything with the lock */
+       if (file_priv->minor->master)
+               drm_master_release(dev, filp);
+
+       drm_events_release(file_priv);
+
+       if (dev->driver->driver_features & DRIVER_GEM)
+               drm_gem_release(dev, file_priv);
+
+       if (dev->driver->driver_features & DRIVER_MODESET)
+               drm_fb_release(file_priv);
+
+       mutex_lock(&dev->ctxlist_mutex);
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->tag == file_priv &&
+                           pos->handle != DRM_KERNEL_CONTEXT) {
+                               if (dev->driver->context_dtor)
+                                       dev->driver->context_dtor(dev,
+                                                                 pos->handle);
+
+                               drm_ctxbitmap_free(dev, pos->handle);
+
+                               list_del(&pos->head);
+                               kfree(pos);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (file_priv->is_master) {
+               struct drm_master *master = file_priv->master;
+               struct drm_file *temp;
+               list_for_each_entry(temp, &dev->filelist, lhead) {
+                       if ((temp->master == file_priv->master) &&
+                           (temp != file_priv))
+                               temp->authenticated = 0;
+               }
+
+               /**
+                * Since the master is disappearing, so is the
+                * possibility to lock.
+                */
+
+               if (master->lock.hw_lock) {
+                       if (dev->sigdata.lock == master->lock.hw_lock)
+                               dev->sigdata.lock = NULL;
+                       master->lock.hw_lock = NULL;
+                       master->lock.file_priv = NULL;
+                       wake_up_interruptible_all(&master->lock.lock_queue);
+               }
+
+               if (file_priv->minor->master == file_priv->master) {
+                       /* drop the reference held my the minor */
+                       if (dev->driver->master_drop)
+                               dev->driver->master_drop(dev, file_priv, true);
+                       drm_master_put(&file_priv->minor->master);
+               }
+       }
+
+       /* drop the reference held my the file priv */
+       drm_master_put(&file_priv->master);
+       file_priv->is_master = 0;
+       list_del(&file_priv->lhead);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (dev->driver->postclose)
+               dev->driver->postclose(dev, file_priv);
+       kfree(file_priv);
+
+       /* ========================================================
+        * End inline drm_release
+        */
+
+       atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+       if (!--dev->open_count) {
+               if (atomic_read(&dev->ioctl_count)) {
+                       DRM_ERROR("Device busy: %d\n",
+                                 atomic_read(&dev->ioctl_count));
+                       retcode = -EBUSY;
+               } else
+                       retcode = drm_lastclose(dev);
+       }
+       mutex_unlock(&drm_global_mutex);
+
+       return retcode;
+}
+EXPORT_SYMBOL(drm_release);
+
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+                 size_t total, size_t max, struct drm_pending_event **out)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_pending_event *e;
+       unsigned long flags;
+       bool ret = false;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       *out = NULL;
+       if (list_empty(&file_priv->event_list))
+               goto out;
+       e = list_first_entry(&file_priv->event_list,
+                            struct drm_pending_event, link);
+       if (e->event->length + total > max)
+               goto out;
+
+       file_priv->event_space += e->event->length;
+       list_del(&e->link);
+       *out = e;
+       ret = true;
+
+out:
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+       return ret;
+}
+
+ssize_t drm_read(struct file *filp, char __user *buffer,
+                size_t count, loff_t *offset)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_pending_event *e;
+       size_t total;
+       ssize_t ret;
+
+       ret = wait_event_interruptible(file_priv->event_wait,
+                                      !list_empty(&file_priv->event_list));
+       if (ret < 0)
+               return ret;
+
+       total = 0;
+       while (drm_dequeue_event(file_priv, total, count, &e)) {
+               if (copy_to_user(buffer + total,
+                                e->event, e->event->length)) {
+                       total = -EFAULT;
+                       break;
+               }
+
+               total += e->event->length;
+               e->destroy(e);
+       }
+
+       return total;
+}
+EXPORT_SYMBOL(drm_read);
+
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+{
+       struct drm_file *file_priv = filp->private_data;
+       unsigned int mask = 0;
+
+       poll_wait(filp, &file_priv->event_wait, wait);
+
+       if (!list_empty(&file_priv->event_list))
+               mask |= POLLIN | POLLRDNORM;
+
+       return mask;
+}
+EXPORT_SYMBOL(drm_poll);
diff --git a/services4/3rdparty/linux_drm/drm_gem.c b/services4/3rdparty/linux_drm/drm_gem.c
new file mode 100644 (file)
index 0000000..ea1c4b0
--- /dev/null
@@ -0,0 +1,581 @@
+/*
+ * Copyright Â© 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include "drmP.h"
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls.  However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file.  However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ *   default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ *   handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls.  The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+
+/* pgoff in mmap is an unsigned long, so we need to make sure that
+ * the faked up offset will fit
+ */
+
+#if BITS_PER_LONG == 64
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#else
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
+#endif
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+       struct drm_gem_mm *mm;
+
+       spin_lock_init(&dev->object_name_lock);
+       idr_init(&dev->object_name_idr);
+
+       mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
+       if (!mm) {
+               DRM_ERROR("out of memory\n");
+               return -ENOMEM;
+       }
+
+       dev->mm_private = mm;
+
+       if (drm_ht_create(&mm->offset_hash, 19)) {
+               kfree(mm);
+               return -ENOMEM;
+       }
+
+       if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+                       DRM_FILE_PAGE_OFFSET_SIZE)) {
+               drm_ht_remove(&mm->offset_hash);
+               kfree(mm);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void
+drm_gem_destroy(struct drm_device *dev)
+{
+       struct drm_gem_mm *mm = dev->mm_private;
+
+       drm_mm_takedown(&mm->offset_manager);
+       drm_ht_remove(&mm->offset_hash);
+       kfree(mm);
+       dev->mm_private = NULL;
+}
+
+/**
+ * Initialize an already allocate GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev,
+                       struct drm_gem_object *obj, size_t size)
+{
+       BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+       obj->dev = dev;
+       obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+       if (IS_ERR(obj->filp))
+               return -ENOMEM;
+
+       kref_init(&obj->refcount);
+       atomic_set(&obj->handle_count, 0);
+       obj->size = size;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+       struct drm_gem_object *obj;
+
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (!obj)
+               goto free;
+
+       if (drm_gem_object_init(dev, obj, size) != 0)
+               goto free;
+
+       if (dev->driver->gem_init_object != NULL &&
+           dev->driver->gem_init_object(obj) != 0) {
+               goto fput;
+       }
+       return obj;
+fput:
+       /* Object_init mangles the global counters - readjust them. */
+       fput(obj->filp);
+free:
+       kfree(obj);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_gem_object_alloc);
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+{
+       struct drm_device *dev;
+       struct drm_gem_object *obj;
+
+       /* This is gross. The idr system doesn't let us try a delete and
+        * return an error code.  It just spews if you fail at deleting.
+        * So, we have to grab a lock around finding the object and then
+        * doing the delete on it and dropping the refcount, or the user
+        * could race us to double-decrement the refcount and cause a
+        * use-after-free later.  Given the frequency of our handle lookups,
+        * we may want to use ida for number allocation and a hash table
+        * for the pointers, anyway.
+        */
+       spin_lock(&filp->table_lock);
+
+       /* Check if we currently have a reference on the object */
+       obj = idr_find(&filp->object_idr, handle);
+       if (obj == NULL) {
+               spin_unlock(&filp->table_lock);
+               return -EINVAL;
+       }
+       dev = obj->dev;
+
+       /* Release reference and decrement refcount. */
+       idr_remove(&filp->object_idr, handle);
+       spin_unlock(&filp->table_lock);
+
+       drm_gem_object_handle_unreference_unlocked(obj);
+
+       return 0;
+}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+                      struct drm_gem_object *obj,
+                      u32 *handlep)
+{
+       int     ret;
+
+       /*
+        * Get the user-visible handle using idr.
+        */
+again:
+       /* ensure there is space available to allocate a handle */
+       if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+               return -ENOMEM;
+
+       /* do the allocation under our spinlock */
+       spin_lock(&file_priv->table_lock);
+       ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
+       spin_unlock(&file_priv->table_lock);
+       if (ret == -EAGAIN)
+               goto again;
+
+       if (ret != 0)
+               return ret;
+
+       drm_gem_object_handle_reference(obj);
+       return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+                     u32 handle)
+{
+       struct drm_gem_object *obj;
+
+       spin_lock(&filp->table_lock);
+
+       /* Check if we currently have a reference on the object */
+       obj = idr_find(&filp->object_idr, handle);
+       if (obj == NULL) {
+               spin_unlock(&filp->table_lock);
+               return NULL;
+       }
+
+       drm_gem_object_reference(obj);
+
+       spin_unlock(&filp->table_lock);
+
+       return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_gem_close *args = data;
+       int ret;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       ret = drm_gem_handle_delete(file_priv, args->handle);
+
+       return ret;
+}
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_gem_flink *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -ENOENT;
+
+again:
+       if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       spin_lock(&dev->object_name_lock);
+       if (!obj->name) {
+               ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
+                                       &obj->name);
+               args->name = (uint64_t) obj->name;
+               spin_unlock(&dev->object_name_lock);
+
+               if (ret == -EAGAIN)
+                       goto again;
+
+               if (ret != 0)
+                       goto err;
+
+               /* Allocate a reference for the name table.  */
+               drm_gem_object_reference(obj);
+       } else {
+               args->name = (uint64_t) obj->name;
+               spin_unlock(&dev->object_name_lock);
+               ret = 0;
+       }
+
+err:
+       drm_gem_object_unreference_unlocked(obj);
+       return ret;
+}
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv)
+{
+       struct drm_gem_open *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+       u32 handle;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       spin_lock(&dev->object_name_lock);
+       obj = idr_find(&dev->object_name_idr, (int) args->name);
+       if (obj)
+               drm_gem_object_reference(obj);
+       spin_unlock(&dev->object_name_lock);
+       if (!obj)
+               return -ENOENT;
+
+       ret = drm_gem_handle_create(file_priv, obj, &handle);
+       drm_gem_object_unreference_unlocked(obj);
+       if (ret)
+               return ret;
+
+       args->handle = handle;
+       args->size = obj->size;
+
+       return 0;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+       idr_init(&file_private->object_idr);
+       spin_lock_init(&file_private->table_lock);
+}
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+       struct drm_gem_object *obj = ptr;
+
+       drm_gem_object_handle_unreference_unlocked(obj);
+
+       return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+       idr_for_each(&file_private->object_idr,
+                    &drm_gem_object_release_handle, NULL);
+
+       idr_remove_all(&file_private->object_idr);
+       idr_destroy(&file_private->object_idr);
+}
+
+void
+drm_gem_object_release(struct drm_gem_object *obj)
+{
+       fput(obj->filp);
+}
+EXPORT_SYMBOL(drm_gem_object_release);
+
+/**
+ * Called after the last reference to the object has been lost.
+ * Must be called holding struct_ mutex
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+       struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+       struct drm_device *dev = obj->dev;
+
+       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (dev->driver->gem_free_object != NULL)
+               dev->driver->gem_free_object(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+       BUG();
+}
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+
+       /* Remove any name for this object */
+       spin_lock(&dev->object_name_lock);
+       if (obj->name) {
+               idr_remove(&dev->object_name_idr, obj->name);
+               obj->name = 0;
+               spin_unlock(&dev->object_name_lock);
+               /*
+                * The object name held a reference to this object, drop
+                * that now.
+               *
+               * This cannot be the last reference, since the handle holds one too.
+                */
+               kref_put(&obj->refcount, drm_gem_object_ref_bug);
+       } else
+               spin_unlock(&dev->object_name_lock);
+
+}
+EXPORT_SYMBOL(drm_gem_object_handle_free);
+
+void drm_gem_vm_open(struct vm_area_struct *vma)
+{
+       struct drm_gem_object *obj = vma->vm_private_data;
+
+       drm_gem_object_reference(obj);
+
+       mutex_lock(&obj->dev->struct_mutex);
+       drm_vm_open_locked(vma);
+       mutex_unlock(&obj->dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_vm_open);
+
+void drm_gem_vm_close(struct vm_area_struct *vma)
+{
+       struct drm_gem_object *obj = vma->vm_private_data;
+
+       mutex_lock(&obj->dev->struct_mutex);
+       drm_vm_close_locked(vma);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&obj->dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_vm_close);
+
+
+/**
+ * drm_gem_mmap - memory map routine for GEM objects
+ * @filp: DRM file pointer
+ * @vma: VMA for the area to be mapped
+ *
+ * If a driver supports GEM object mapping, mmap calls on the DRM file
+ * descriptor will end up here.
+ *
+ * If we find the object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created when the GTT map ioctl was called on
+ * the object), we set up the driver fault handler so that any accesses
+ * to the object can be trapped, to perform migration, GTT binding, surface
+ * register allocation, or performance monitoring.
+ */
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_gem_mm *mm = dev->mm_private;
+       struct drm_local_map *map = NULL;
+       struct drm_gem_object *obj;
+       struct drm_hash_item *hash;
+       int ret = 0;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+               mutex_unlock(&dev->struct_mutex);
+               return drm_mmap(filp, vma);
+       }
+
+       map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+       if (!map ||
+           ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
+               ret =  -EPERM;
+               goto out_unlock;
+       }
+
+       /* Check for valid size. */
+       if (map->size < vma->vm_end - vma->vm_start) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       obj = map->handle;
+       if (!obj->dev->driver->gem_vm_ops) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+       vma->vm_ops = obj->dev->driver->gem_vm_ops;
+       vma->vm_private_data = map->handle;
+       vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+       /* Take a ref for this mapping of the object, so that the fault
+        * handler can dereference the mmap offset's pointer to the object.
+        * This reference is cleaned up by the corresponding vm_close
+        * (which should happen whether the vma was created by this call, or
+        * by a vm_open due to mremap or partial unmap or whatever).
+        */
+       drm_gem_object_reference(obj);
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_gem_mmap);
diff --git a/services4/3rdparty/linux_drm/drm_global.c b/services4/3rdparty/linux_drm/drm_global.c
new file mode 100644 (file)
index 0000000..c87dc96
--- /dev/null
@@ -0,0 +1,112 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "drm_global.h"
+
+struct drm_global_item {
+       struct mutex mutex;
+       void *object;
+       int refcount;
+};
+
+static struct drm_global_item glob[DRM_GLOBAL_NUM];
+
+void drm_global_init(void)
+{
+       int i;
+
+       for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+               struct drm_global_item *item = &glob[i];
+               mutex_init(&item->mutex);
+               item->object = NULL;
+               item->refcount = 0;
+       }
+}
+
+void drm_global_release(void)
+{
+       int i;
+       for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+               struct drm_global_item *item = &glob[i];
+               BUG_ON(item->object != NULL);
+               BUG_ON(item->refcount != 0);
+       }
+}
+
+int drm_global_item_ref(struct drm_global_reference *ref)
+{
+       int ret;
+       struct drm_global_item *item = &glob[ref->global_type];
+       void *object;
+
+       mutex_lock(&item->mutex);
+       if (item->refcount == 0) {
+               item->object = kzalloc(ref->size, GFP_KERNEL);
+               if (unlikely(item->object == NULL)) {
+                       ret = -ENOMEM;
+                       goto out_err;
+               }
+
+               ref->object = item->object;
+               ret = ref->init(ref);
+               if (unlikely(ret != 0))
+                       goto out_err;
+
+       }
+       ++item->refcount;
+       ref->object = item->object;
+       object = item->object;
+       mutex_unlock(&item->mutex);
+       return 0;
+out_err:
+       mutex_unlock(&item->mutex);
+       item->object = NULL;
+       return ret;
+}
+EXPORT_SYMBOL(drm_global_item_ref);
+
+void drm_global_item_unref(struct drm_global_reference *ref)
+{
+       struct drm_global_item *item = &glob[ref->global_type];
+
+       mutex_lock(&item->mutex);
+       BUG_ON(item->refcount == 0);
+       BUG_ON(ref->object != item->object);
+       if (--item->refcount == 0) {
+               ref->release(ref);
+               item->object = NULL;
+       }
+       mutex_unlock(&item->mutex);
+}
+EXPORT_SYMBOL(drm_global_item_unref);
+
diff --git a/services4/3rdparty/linux_drm/drm_hashtab.c b/services4/3rdparty/linux_drm/drm_hashtab.c
new file mode 100644 (file)
index 0000000..a93d7b4
--- /dev/null
@@ -0,0 +1,207 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm_hashtab.h"
+#include <linux/hash.h>
+#include <linux/slab.h>
+
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
+{
+       unsigned int i;
+
+       ht->size = 1 << order;
+       ht->order = order;
+       ht->fill = 0;
+       ht->table = NULL;
+       ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
+       if (!ht->use_vmalloc) {
+               ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
+       }
+       if (!ht->table) {
+               ht->use_vmalloc = 1;
+               ht->table = vmalloc(ht->size*sizeof(*ht->table));
+       }
+       if (!ht->table) {
+               DRM_ERROR("Out of memory for hash table\n");
+               return -ENOMEM;
+       }
+       for (i=0; i< ht->size; ++i) {
+               INIT_HLIST_HEAD(&ht->table[i]);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_create);
+
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list;
+       unsigned int hashed_key;
+       int count = 0;
+
+       hashed_key = hash_long(key, ht->order);
+       DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+       h_list = &ht->table[hashed_key];
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+       }
+}
+
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
+                                         unsigned long key)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list;
+       unsigned int hashed_key;
+
+       hashed_key = hash_long(key, ht->order);
+       h_list = &ht->table[hashed_key];
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               if (entry->key == key)
+                       return list;
+               if (entry->key > key)
+                       break;
+       }
+       return NULL;
+}
+
+
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list, *parent;
+       unsigned int hashed_key;
+       unsigned long key = item->key;
+
+       hashed_key = hash_long(key, ht->order);
+       h_list = &ht->table[hashed_key];
+       parent = NULL;
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               if (entry->key == key)
+                       return -EINVAL;
+               if (entry->key > key)
+                       break;
+               parent = list;
+       }
+       if (parent) {
+               hlist_add_after(parent, &item->head);
+       } else {
+               hlist_add_head(&item->head, h_list);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_insert_item);
+
+/*
+ * Just insert an item and return any "bits" bit key that hasn't been
+ * used before.
+ */
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+                             unsigned long seed, int bits, int shift,
+                             unsigned long add)
+{
+       int ret;
+       unsigned long mask = (1 << bits) - 1;
+       unsigned long first, unshifted_key;
+
+       unshifted_key = hash_long(seed, bits);
+       first = unshifted_key;
+       do {
+               item->key = (unshifted_key << shift) + add;
+               ret = drm_ht_insert_item(ht, item);
+               if (ret)
+                       unshifted_key = (unshifted_key + 1) & mask;
+       } while(ret && (unshifted_key != first));
+
+       if (ret) {
+               DRM_ERROR("Available key bit space exhausted\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_just_insert_please);
+
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+                    struct drm_hash_item **item)
+{
+       struct hlist_node *list;
+
+       list = drm_ht_find_key(ht, key);
+       if (!list)
+               return -EINVAL;
+
+       *item = hlist_entry(list, struct drm_hash_item, head);
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_find_item);
+
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
+{
+       struct hlist_node *list;
+
+       list = drm_ht_find_key(ht, key);
+       if (list) {
+               hlist_del_init(list);
+               ht->fill--;
+               return 0;
+       }
+       return -EINVAL;
+}
+
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+       hlist_del_init(&item->head);
+       ht->fill--;
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_remove_item);
+
+void drm_ht_remove(struct drm_open_hash *ht)
+{
+       if (ht->table) {
+               if (ht->use_vmalloc)
+                       vfree(ht->table);
+               else
+                       kfree(ht->table);
+               ht->table = NULL;
+       }
+}
+EXPORT_SYMBOL(drm_ht_remove);
diff --git a/services4/3rdparty/linux_drm/drm_info.c b/services4/3rdparty/linux_drm/drm_info.c
new file mode 100644 (file)
index 0000000..3cdbaf3
--- /dev/null
@@ -0,0 +1,325 @@
+/**
+ * \file drm_info.c
+ * DRM info file implementations
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+/**
+ * Called when "/proc/dri/.../name" is read.
+ *
+ * Prints the device name together with the bus id if available.
+ */
+int drm_name_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_minor *minor = node->minor;
+       struct drm_device *dev = minor->dev;
+       struct drm_master *master = minor->master;
+
+       if (!master)
+               return 0;
+
+       if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
+               if (master->unique) {
+                       seq_printf(m, "%s %s %s\n",
+                                       dev->driver->platform_device->name,
+                                       dev_name(dev->dev), master->unique);
+               } else {
+                       seq_printf(m, "%s\n",
+                               dev->driver->platform_device->name);
+               }
+       } else {
+               if (master->unique) {
+                       seq_printf(m, "%s %s %s\n",
+                               dev->driver->pci_driver.name,
+                               dev_name(dev->dev), master->unique);
+               } else {
+                       seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
+                               dev_name(dev->dev));
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vm" is read.
+ *
+ * Prints information about all mappings in drm_device::maplist.
+ */
+int drm_vm_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_local_map *map;
+       struct drm_map_list *r_list;
+
+       /* Hardcoded from _DRM_FRAME_BUFFER,
+          _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+          _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+       const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+       const char *type;
+       int i;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "slot      offset       size type flags    address mtrr\n\n");
+       i = 0;
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               map = r_list->map;
+               if (!map)
+                       continue;
+               if (map->type < 0 || map->type > 5)
+                       type = "??";
+               else
+                       type = types[map->type];
+
+               seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s  0x%02x 0x%08lx ",
+                          i,
+                          (unsigned long long)map->offset,
+                          map->size, type, map->flags,
+                          (unsigned long) r_list->user_token);
+               if (map->mtrr < 0)
+                       seq_printf(m, "none\n");
+               else
+                       seq_printf(m, "%4d\n", map->mtrr);
+               i++;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../queues" is read.
+ */
+int drm_queues_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       int i;
+       struct drm_queue *q;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "  ctx/flags   use   fin"
+                  "   blk/rw/rwf  wait    flushed         queued"
+                  "      locks\n\n");
+       for (i = 0; i < dev->queue_count; i++) {
+               q = dev->queuelist[i];
+               atomic_inc(&q->use_count);
+               seq_printf(m,   "%5d/0x%03x %5d %5d"
+                          " %5d/%c%c/%c%c%c %5Zd\n",
+                          i,
+                          q->flags,
+                          atomic_read(&q->use_count),
+                          atomic_read(&q->finalization),
+                          atomic_read(&q->block_count),
+                          atomic_read(&q->block_read) ? 'r' : '-',
+                          atomic_read(&q->block_write) ? 'w' : '-',
+                          waitqueue_active(&q->read_queue) ? 'r' : '-',
+                          waitqueue_active(&q->write_queue) ? 'w' : '-',
+                          waitqueue_active(&q->flush_queue) ? 'f' : '-',
+                          DRM_BUFCOUNT(&q->waitlist));
+               atomic_dec(&q->use_count);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../bufs" is read.
+ */
+int drm_bufs_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_device_dma *dma;
+       int i, seg_pages;
+
+       mutex_lock(&dev->struct_mutex);
+       dma = dev->dma;
+       if (!dma) {
+               mutex_unlock(&dev->struct_mutex);
+               return 0;
+       }
+
+       seq_printf(m, " o     size count  free   segs pages    kB\n\n");
+       for (i = 0; i <= DRM_MAX_ORDER; i++) {
+               if (dma->bufs[i].buf_count) {
+                       seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
+                       seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
+                                  i,
+                                  dma->bufs[i].buf_size,
+                                  dma->bufs[i].buf_count,
+                                  atomic_read(&dma->bufs[i].freelist.count),
+                                  dma->bufs[i].seg_count,
+                                  seg_pages,
+                                  seg_pages * PAGE_SIZE / 1024);
+               }
+       }
+       seq_printf(m, "\n");
+       for (i = 0; i < dma->buf_count; i++) {
+               if (i && !(i % 32))
+                       seq_printf(m, "\n");
+               seq_printf(m, " %d", dma->buflist[i]->list);
+       }
+       seq_printf(m, "\n");
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vblank" is read.
+ */
+int drm_vblank_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       int crtc;
+
+       mutex_lock(&dev->struct_mutex);
+       for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+               seq_printf(m, "CRTC %d enable:     %d\n",
+                          crtc, atomic_read(&dev->vblank_refcount[crtc]));
+               seq_printf(m, "CRTC %d counter:    %d\n",
+                          crtc, drm_vblank_count(dev, crtc));
+               seq_printf(m, "CRTC %d last wait:  %d\n",
+                          crtc, dev->last_vblank_wait[crtc]);
+               seq_printf(m, "CRTC %d in modeset: %d\n",
+                          crtc, dev->vblank_inmodeset[crtc]);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../clients" is read.
+ *
+ */
+int drm_clients_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_file *priv;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "a dev    pid    uid      magic     ioctls\n\n");
+       list_for_each_entry(priv, &dev->filelist, lhead) {
+               seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+                          priv->authenticated ? 'y' : 'n',
+                          priv->minor->index,
+                          priv->pid,
+                          priv->uid, priv->magic, priv->ioctl_count);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+
+int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+       struct drm_gem_object *obj = ptr;
+       struct seq_file *m = data;
+
+       seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
+
+       seq_printf(m, "%6d %8zd %7d %8d\n",
+                  obj->name, obj->size,
+                  atomic_read(&obj->handle_count),
+                  atomic_read(&obj->refcount.refcount));
+       return 0;
+}
+
+int drm_gem_name_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+
+       seq_printf(m, "  name     size handles refcount\n");
+       idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+       return 0;
+}
+
+#if DRM_DEBUG_CODE
+
+int drm_vma_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_vma_entry *pt;
+       struct vm_area_struct *vma;
+#if defined(__i386__)
+       unsigned int pgprot;
+#endif
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
+                  atomic_read(&dev->vma_count),
+                  high_memory, (u64)virt_to_phys(high_memory));
+
+       list_for_each_entry(pt, &dev->vmalist, head) {
+               vma = pt->vma;
+               if (!vma)
+                       continue;
+               seq_printf(m,
+                          "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
+                          pt->pid, vma->vm_start, vma->vm_end,
+                          vma->vm_flags & VM_READ ? 'r' : '-',
+                          vma->vm_flags & VM_WRITE ? 'w' : '-',
+                          vma->vm_flags & VM_EXEC ? 'x' : '-',
+                          vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+                          vma->vm_flags & VM_LOCKED ? 'l' : '-',
+                          vma->vm_flags & VM_IO ? 'i' : '-',
+                          vma->vm_pgoff);
+
+#if defined(__i386__)
+               pgprot = pgprot_val(vma->vm_page_prot);
+               seq_printf(m, " %c%c%c%c%c%c%c%c%c",
+                          pgprot & _PAGE_PRESENT ? 'p' : '-',
+                          pgprot & _PAGE_RW ? 'w' : 'r',
+                          pgprot & _PAGE_USER ? 'u' : 's',
+                          pgprot & _PAGE_PWT ? 't' : 'b',
+                          pgprot & _PAGE_PCD ? 'u' : 'c',
+                          pgprot & _PAGE_ACCESSED ? 'a' : '-',
+                          pgprot & _PAGE_DIRTY ? 'd' : '-',
+                          pgprot & _PAGE_PSE ? 'm' : 'k',
+                          pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+#endif
+               seq_printf(m, "\n");
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+#endif
+
diff --git a/services4/3rdparty/linux_drm/drm_ioc32.c b/services4/3rdparty/linux_drm/drm_ioc32.c
new file mode 100644 (file)
index 0000000..d61d185
--- /dev/null
@@ -0,0 +1,1082 @@
+/**
+ * \file drm_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the DRM.
+ *
+ * \author Paul Mackerras <paulus@samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm_core.h"
+
+#define DRM_IOCTL_VERSION32            DRM_IOWR(0x00, drm_version32_t)
+#define DRM_IOCTL_GET_UNIQUE32         DRM_IOWR(0x01, drm_unique32_t)
+#define DRM_IOCTL_GET_MAP32            DRM_IOWR(0x04, drm_map32_t)
+#define DRM_IOCTL_GET_CLIENT32         DRM_IOWR(0x05, drm_client32_t)
+#define DRM_IOCTL_GET_STATS32          DRM_IOR( 0x06, drm_stats32_t)
+
+#define DRM_IOCTL_SET_UNIQUE32         DRM_IOW( 0x10, drm_unique32_t)
+#define DRM_IOCTL_ADD_MAP32            DRM_IOWR(0x15, drm_map32_t)
+#define DRM_IOCTL_ADD_BUFS32           DRM_IOWR(0x16, drm_buf_desc32_t)
+#define DRM_IOCTL_MARK_BUFS32          DRM_IOW( 0x17, drm_buf_desc32_t)
+#define DRM_IOCTL_INFO_BUFS32          DRM_IOWR(0x18, drm_buf_info32_t)
+#define DRM_IOCTL_MAP_BUFS32           DRM_IOWR(0x19, drm_buf_map32_t)
+#define DRM_IOCTL_FREE_BUFS32          DRM_IOW( 0x1a, drm_buf_free32_t)
+
+#define DRM_IOCTL_RM_MAP32             DRM_IOW( 0x1b, drm_map32_t)
+
+#define DRM_IOCTL_SET_SAREA_CTX32      DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
+#define DRM_IOCTL_GET_SAREA_CTX32      DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
+
+#define DRM_IOCTL_RES_CTX32            DRM_IOWR(0x26, drm_ctx_res32_t)
+#define DRM_IOCTL_DMA32                        DRM_IOWR(0x29, drm_dma32_t)
+
+#define DRM_IOCTL_AGP_ENABLE32         DRM_IOW( 0x32, drm_agp_mode32_t)
+#define DRM_IOCTL_AGP_INFO32           DRM_IOR( 0x33, drm_agp_info32_t)
+#define DRM_IOCTL_AGP_ALLOC32          DRM_IOWR(0x34, drm_agp_buffer32_t)
+#define DRM_IOCTL_AGP_FREE32           DRM_IOW( 0x35, drm_agp_buffer32_t)
+#define DRM_IOCTL_AGP_BIND32           DRM_IOW( 0x36, drm_agp_binding32_t)
+#define DRM_IOCTL_AGP_UNBIND32         DRM_IOW( 0x37, drm_agp_binding32_t)
+
+#define DRM_IOCTL_SG_ALLOC32           DRM_IOW( 0x38, drm_scatter_gather32_t)
+#define DRM_IOCTL_SG_FREE32            DRM_IOW( 0x39, drm_scatter_gather32_t)
+
+#define DRM_IOCTL_UPDATE_DRAW32                DRM_IOW( 0x3f, drm_update_draw32_t)
+
+#define DRM_IOCTL_WAIT_VBLANK32                DRM_IOWR(0x3a, drm_wait_vblank32_t)
+
+typedef struct drm_version_32 {
+       int version_major;        /**< Major version */
+       int version_minor;        /**< Minor version */
+       int version_patchlevel;    /**< Patch level */
+       u32 name_len;             /**< Length of name buffer */
+       u32 name;                 /**< Name of driver */
+       u32 date_len;             /**< Length of date buffer */
+       u32 date;                 /**< User-space buffer to hold date */
+       u32 desc_len;             /**< Length of desc buffer */
+       u32 desc;                 /**< User-space buffer to hold desc */
+} drm_version32_t;
+
+static int compat_drm_version(struct file *file, unsigned int cmd,
+                             unsigned long arg)
+{
+       drm_version32_t v32;
+       struct drm_version __user *version;
+       int err;
+
+       if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
+               return -EFAULT;
+
+       version = compat_alloc_user_space(sizeof(*version));
+       if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+               return -EFAULT;
+       if (__put_user(v32.name_len, &version->name_len)
+           || __put_user((void __user *)(unsigned long)v32.name,
+                         &version->name)
+           || __put_user(v32.date_len, &version->date_len)
+           || __put_user((void __user *)(unsigned long)v32.date,
+                         &version->date)
+           || __put_user(v32.desc_len, &version->desc_len)
+           || __put_user((void __user *)(unsigned long)v32.desc,
+                         &version->desc))
+               return -EFAULT;
+
+       err = drm_ioctl(file,
+                       DRM_IOCTL_VERSION, (unsigned long)version);
+       if (err)
+               return err;
+
+       if (__get_user(v32.version_major, &version->version_major)
+           || __get_user(v32.version_minor, &version->version_minor)
+           || __get_user(v32.version_patchlevel, &version->version_patchlevel)
+           || __get_user(v32.name_len, &version->name_len)
+           || __get_user(v32.date_len, &version->date_len)
+           || __get_user(v32.desc_len, &version->desc_len))
+               return -EFAULT;
+
+       if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
+               return -EFAULT;
+       return 0;
+}
+
+typedef struct drm_unique32 {
+       u32 unique_len; /**< Length of unique */
+       u32 unique;     /**< Unique name for driver instantiation */
+} drm_unique32_t;
+
+static int compat_drm_getunique(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       drm_unique32_t uq32;
+       struct drm_unique __user *u;
+       int err;
+
+       if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
+               return -EFAULT;
+
+       u = compat_alloc_user_space(sizeof(*u));
+       if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+               return -EFAULT;
+       if (__put_user(uq32.unique_len, &u->unique_len)
+           || __put_user((void __user *)(unsigned long)uq32.unique,
+                         &u->unique))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
+       if (err)
+               return err;
+
+       if (__get_user(uq32.unique_len, &u->unique_len))
+               return -EFAULT;
+       if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
+               return -EFAULT;
+       return 0;
+}
+
+static int compat_drm_setunique(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       drm_unique32_t uq32;
+       struct drm_unique __user *u;
+
+       if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
+               return -EFAULT;
+
+       u = compat_alloc_user_space(sizeof(*u));
+       if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+               return -EFAULT;
+       if (__put_user(uq32.unique_len, &u->unique_len)
+           || __put_user((void __user *)(unsigned long)uq32.unique,
+                         &u->unique))
+               return -EFAULT;
+
+       return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
+}
+
+typedef struct drm_map32 {
+       u32 offset;             /**< Requested physical address (0 for SAREA)*/
+       u32 size;               /**< Requested physical size (bytes) */
+       enum drm_map_type type; /**< Type of memory to map */
+       enum drm_map_flags flags;       /**< Flags */
+       u32 handle;             /**< User-space: "Handle" to pass to mmap() */
+       int mtrr;               /**< MTRR slot used */
+} drm_map32_t;
+
+static int compat_drm_getmap(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       drm_map32_t __user *argp = (void __user *)arg;
+       drm_map32_t m32;
+       struct drm_map __user *map;
+       int idx, err;
+       void *handle;
+
+       if (get_user(idx, &argp->offset))
+               return -EFAULT;
+
+       map = compat_alloc_user_space(sizeof(*map));
+       if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+               return -EFAULT;
+       if (__put_user(idx, &map->offset))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
+       if (err)
+               return err;
+
+       if (__get_user(m32.offset, &map->offset)
+           || __get_user(m32.size, &map->size)
+           || __get_user(m32.type, &map->type)
+           || __get_user(m32.flags, &map->flags)
+           || __get_user(handle, &map->handle)
+           || __get_user(m32.mtrr, &map->mtrr))
+               return -EFAULT;
+
+       m32.handle = (unsigned long)handle;
+       if (copy_to_user(argp, &m32, sizeof(m32)))
+               return -EFAULT;
+       return 0;
+
+}
+
+static int compat_drm_addmap(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       drm_map32_t __user *argp = (void __user *)arg;
+       drm_map32_t m32;
+       struct drm_map __user *map;
+       int err;
+       void *handle;
+
+       if (copy_from_user(&m32, argp, sizeof(m32)))
+               return -EFAULT;
+
+       map = compat_alloc_user_space(sizeof(*map));
+       if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+               return -EFAULT;
+       if (__put_user(m32.offset, &map->offset)
+           || __put_user(m32.size, &map->size)
+           || __put_user(m32.type, &map->type)
+           || __put_user(m32.flags, &map->flags))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
+       if (err)
+               return err;
+
+       if (__get_user(m32.offset, &map->offset)
+           || __get_user(m32.mtrr, &map->mtrr)
+           || __get_user(handle, &map->handle))
+               return -EFAULT;
+
+       m32.handle = (unsigned long)handle;
+       if (m32.handle != (unsigned long)handle && printk_ratelimit())
+               printk(KERN_ERR "compat_drm_addmap truncated handle"
+                      " %p for type %d offset %x\n",
+                      handle, m32.type, m32.offset);
+
+       if (copy_to_user(argp, &m32, sizeof(m32)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int compat_drm_rmmap(struct file *file, unsigned int cmd,
+                           unsigned long arg)
+{
+       drm_map32_t __user *argp = (void __user *)arg;
+       struct drm_map __user *map;
+       u32 handle;
+
+       if (get_user(handle, &argp->handle))
+               return -EFAULT;
+
+       map = compat_alloc_user_space(sizeof(*map));
+       if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+               return -EFAULT;
+       if (__put_user((void *)(unsigned long)handle, &map->handle))
+               return -EFAULT;
+
+       return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
+}
+
+typedef struct drm_client32 {
+       int idx;        /**< Which client desired? */
+       int auth;       /**< Is client authenticated? */
+       u32 pid;        /**< Process ID */
+       u32 uid;        /**< User ID */
+       u32 magic;      /**< Magic */
+       u32 iocs;       /**< Ioctl count */
+} drm_client32_t;
+
+static int compat_drm_getclient(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       drm_client32_t c32;
+       drm_client32_t __user *argp = (void __user *)arg;
+       struct drm_client __user *client;
+       int idx, err;
+
+       if (get_user(idx, &argp->idx))
+               return -EFAULT;
+
+       client = compat_alloc_user_space(sizeof(*client));
+       if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+               return -EFAULT;
+       if (__put_user(idx, &client->idx))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
+       if (err)
+               return err;
+
+       if (__get_user(c32.auth, &client->auth)
+           || __get_user(c32.pid, &client->pid)
+           || __get_user(c32.uid, &client->uid)
+           || __get_user(c32.magic, &client->magic)
+           || __get_user(c32.iocs, &client->iocs))
+               return -EFAULT;
+
+       if (copy_to_user(argp, &c32, sizeof(c32)))
+               return -EFAULT;
+       return 0;
+}
+
+typedef struct drm_stats32 {
+       u32 count;
+       struct {
+               u32 value;
+               enum drm_stat_type type;
+       } data[15];
+} drm_stats32_t;
+
+static int compat_drm_getstats(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_stats32_t s32;
+       drm_stats32_t __user *argp = (void __user *)arg;
+       struct drm_stats __user *stats;
+       int i, err;
+
+       stats = compat_alloc_user_space(sizeof(*stats));
+       if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
+       if (err)
+               return err;
+
+       if (__get_user(s32.count, &stats->count))
+               return -EFAULT;
+       for (i = 0; i < 15; ++i)
+               if (__get_user(s32.data[i].value, &stats->data[i].value)
+                   || __get_user(s32.data[i].type, &stats->data[i].type))
+                       return -EFAULT;
+
+       if (copy_to_user(argp, &s32, sizeof(s32)))
+               return -EFAULT;
+       return 0;
+}
+
+typedef struct drm_buf_desc32 {
+       int count;               /**< Number of buffers of this size */
+       int size;                /**< Size in bytes */
+       int low_mark;            /**< Low water mark */
+       int high_mark;           /**< High water mark */
+       int flags;
+       u32 agp_start;           /**< Start address in the AGP aperture */
+} drm_buf_desc32_t;
+
+static int compat_drm_addbufs(struct file *file, unsigned int cmd,
+                             unsigned long arg)
+{
+       drm_buf_desc32_t __user *argp = (void __user *)arg;
+       struct drm_buf_desc __user *buf;
+       int err;
+       unsigned long agp_start;
+
+       buf = compat_alloc_user_space(sizeof(*buf));
+       if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
+           || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+               return -EFAULT;
+
+       if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
+           || __get_user(agp_start, &argp->agp_start)
+           || __put_user(agp_start, &buf->agp_start))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
+       if (err)
+               return err;
+
+       if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
+           || __get_user(agp_start, &buf->agp_start)
+           || __put_user(agp_start, &argp->agp_start))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int compat_drm_markbufs(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_buf_desc32_t b32;
+       drm_buf_desc32_t __user *argp = (void __user *)arg;
+       struct drm_buf_desc __user *buf;
+
+       if (copy_from_user(&b32, argp, sizeof(b32)))
+               return -EFAULT;
+
+       buf = compat_alloc_user_space(sizeof(*buf));
+       if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+               return -EFAULT;
+
+       if (__put_user(b32.size, &buf->size)
+           || __put_user(b32.low_mark, &buf->low_mark)
+           || __put_user(b32.high_mark, &buf->high_mark))
+               return -EFAULT;
+
+       return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
+}
+
+typedef struct drm_buf_info32 {
+       int count;              /**< Entries in list */
+       u32 list;
+} drm_buf_info32_t;
+
+static int compat_drm_infobufs(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_buf_info32_t req32;
+       drm_buf_info32_t __user *argp = (void __user *)arg;
+       drm_buf_desc32_t __user *to;
+       struct drm_buf_info __user *request;
+       struct drm_buf_desc __user *list;
+       size_t nbytes;
+       int i, err;
+       int count, actual;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       count = req32.count;
+       to = (drm_buf_desc32_t __user *) (unsigned long)req32.list;
+       if (count < 0)
+               count = 0;
+       if (count > 0
+           && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
+               return -EFAULT;
+
+       nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
+       request = compat_alloc_user_space(nbytes);
+       if (!access_ok(VERIFY_WRITE, request, nbytes))
+               return -EFAULT;
+       list = (struct drm_buf_desc *) (request + 1);
+
+       if (__put_user(count, &request->count)
+           || __put_user(list, &request->list))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
+       if (err)
+               return err;
+
+       if (__get_user(actual, &request->count))
+               return -EFAULT;
+       if (count >= actual)
+               for (i = 0; i < actual; ++i)
+                       if (__copy_in_user(&to[i], &list[i],
+                                          offsetof(struct drm_buf_desc, flags)))
+                               return -EFAULT;
+
+       if (__put_user(actual, &argp->count))
+               return -EFAULT;
+
+       return 0;
+}
+
+typedef struct drm_buf_pub32 {
+       int idx;                /**< Index into the master buffer list */
+       int total;              /**< Buffer size */
+       int used;               /**< Amount of buffer in use (for DMA) */
+       u32 address;            /**< Address of buffer */
+} drm_buf_pub32_t;
+
+typedef struct drm_buf_map32 {
+       int count;              /**< Length of the buffer list */
+       u32 virtual;            /**< Mmap'd area in user-virtual */
+       u32 list;               /**< Buffer information */
+} drm_buf_map32_t;
+
+static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
+                             unsigned long arg)
+{
+       drm_buf_map32_t __user *argp = (void __user *)arg;
+       drm_buf_map32_t req32;
+       drm_buf_pub32_t __user *list32;
+       struct drm_buf_map __user *request;
+       struct drm_buf_pub __user *list;
+       int i, err;
+       int count, actual;
+       size_t nbytes;
+       void __user *addr;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+       count = req32.count;
+       list32 = (void __user *)(unsigned long)req32.list;
+
+       if (count < 0)
+               return -EINVAL;
+       nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
+       request = compat_alloc_user_space(nbytes);
+       if (!access_ok(VERIFY_WRITE, request, nbytes))
+               return -EFAULT;
+       list = (struct drm_buf_pub *) (request + 1);
+
+       if (__put_user(count, &request->count)
+           || __put_user(list, &request->list))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
+       if (err)
+               return err;
+
+       if (__get_user(actual, &request->count))
+               return -EFAULT;
+       if (count >= actual)
+               for (i = 0; i < actual; ++i)
+                       if (__copy_in_user(&list32[i], &list[i],
+                                          offsetof(struct drm_buf_pub, address))
+                           || __get_user(addr, &list[i].address)
+                           || __put_user((unsigned long)addr,
+                                         &list32[i].address))
+                               return -EFAULT;
+
+       if (__put_user(actual, &argp->count)
+           || __get_user(addr, &request->virtual)
+           || __put_user((unsigned long)addr, &argp->virtual))
+               return -EFAULT;
+
+       return 0;
+}
+
+typedef struct drm_buf_free32 {
+       int count;
+       u32 list;
+} drm_buf_free32_t;
+
+static int compat_drm_freebufs(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_buf_free32_t req32;
+       struct drm_buf_free __user *request;
+       drm_buf_free32_t __user *argp = (void __user *)arg;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+               return -EFAULT;
+       if (__put_user(req32.count, &request->count)
+           || __put_user((int __user *)(unsigned long)req32.list,
+                         &request->list))
+               return -EFAULT;
+
+       return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
+}
+
+typedef struct drm_ctx_priv_map32 {
+       unsigned int ctx_id;     /**< Context requesting private mapping */
+       u32 handle;             /**< Handle of map */
+} drm_ctx_priv_map32_t;
+
+static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       drm_ctx_priv_map32_t req32;
+       struct drm_ctx_priv_map __user *request;
+       drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+               return -EFAULT;
+       if (__put_user(req32.ctx_id, &request->ctx_id)
+           || __put_user((void *)(unsigned long)req32.handle,
+                         &request->handle))
+               return -EFAULT;
+
+       return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
+}
+
+static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       struct drm_ctx_priv_map __user *request;
+       drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
+       int err;
+       unsigned int ctx_id;
+       void *handle;
+
+       if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+           || __get_user(ctx_id, &argp->ctx_id))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+               return -EFAULT;
+       if (__put_user(ctx_id, &request->ctx_id))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
+       if (err)
+               return err;
+
+       if (__get_user(handle, &request->handle)
+           || __put_user((unsigned long)handle, &argp->handle))
+               return -EFAULT;
+
+       return 0;
+}
+
+typedef struct drm_ctx_res32 {
+       int count;
+       u32 contexts;
+} drm_ctx_res32_t;
+
+static int compat_drm_resctx(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       drm_ctx_res32_t __user *argp = (void __user *)arg;
+       drm_ctx_res32_t res32;
+       struct drm_ctx_res __user *res;
+       int err;
+
+       if (copy_from_user(&res32, argp, sizeof(res32)))
+               return -EFAULT;
+
+       res = compat_alloc_user_space(sizeof(*res));
+       if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+               return -EFAULT;
+       if (__put_user(res32.count, &res->count)
+           || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
+                         &res->contexts))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
+       if (err)
+               return err;
+
+       if (__get_user(res32.count, &res->count)
+           || __put_user(res32.count, &argp->count))
+               return -EFAULT;
+
+       return 0;
+}
+
+typedef struct drm_dma32 {
+       int context;              /**< Context handle */
+       int send_count;           /**< Number of buffers to send */
+       u32 send_indices;         /**< List of handles to buffers */
+       u32 send_sizes;           /**< Lengths of data to send */
+       enum drm_dma_flags flags;                 /**< Flags */
+       int request_count;        /**< Number of buffers requested */
+       int request_size;         /**< Desired size for buffers */
+       u32 request_indices;      /**< Buffer information */
+       u32 request_sizes;
+       int granted_count;        /**< Number of buffers granted */
+} drm_dma32_t;
+
+static int compat_drm_dma(struct file *file, unsigned int cmd,
+                         unsigned long arg)
+{
+       drm_dma32_t d32;
+       drm_dma32_t __user *argp = (void __user *)arg;
+       struct drm_dma __user *d;
+       int err;
+
+       if (copy_from_user(&d32, argp, sizeof(d32)))
+               return -EFAULT;
+
+       d = compat_alloc_user_space(sizeof(*d));
+       if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+               return -EFAULT;
+
+       if (__put_user(d32.context, &d->context)
+           || __put_user(d32.send_count, &d->send_count)
+           || __put_user((int __user *)(unsigned long)d32.send_indices,
+                         &d->send_indices)
+           || __put_user((int __user *)(unsigned long)d32.send_sizes,
+                         &d->send_sizes)
+           || __put_user(d32.flags, &d->flags)
+           || __put_user(d32.request_count, &d->request_count)
+           || __put_user((int __user *)(unsigned long)d32.request_indices,
+                         &d->request_indices)
+           || __put_user((int __user *)(unsigned long)d32.request_sizes,
+                         &d->request_sizes))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
+       if (err)
+               return err;
+
+       if (__get_user(d32.request_size, &d->request_size)
+           || __get_user(d32.granted_count, &d->granted_count)
+           || __put_user(d32.request_size, &argp->request_size)
+           || __put_user(d32.granted_count, &argp->granted_count))
+               return -EFAULT;
+
+       return 0;
+}
+
+#if __OS_HAS_AGP
+typedef struct drm_agp_mode32 {
+       u32 mode;       /**< AGP mode */
+} drm_agp_mode32_t;
+
+static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
+                                unsigned long arg)
+{
+       drm_agp_mode32_t __user *argp = (void __user *)arg;
+       drm_agp_mode32_t m32;
+       struct drm_agp_mode __user *mode;
+
+       if (get_user(m32.mode, &argp->mode))
+               return -EFAULT;
+
+       mode = compat_alloc_user_space(sizeof(*mode));
+       if (put_user(m32.mode, &mode->mode))
+               return -EFAULT;
+
+       return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
+}
+
+typedef struct drm_agp_info32 {
+       int agp_version_major;
+       int agp_version_minor;
+       u32 mode;
+       u32 aperture_base;      /* physical address */
+       u32 aperture_size;      /* bytes */
+       u32 memory_allowed;     /* bytes */
+       u32 memory_used;
+
+       /* PCI information */
+       unsigned short id_vendor;
+       unsigned short id_device;
+} drm_agp_info32_t;
+
+static int compat_drm_agp_info(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_agp_info32_t __user *argp = (void __user *)arg;
+       drm_agp_info32_t i32;
+       struct drm_agp_info __user *info;
+       int err;
+
+       info = compat_alloc_user_space(sizeof(*info));
+       if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
+       if (err)
+               return err;
+
+       if (__get_user(i32.agp_version_major, &info->agp_version_major)
+           || __get_user(i32.agp_version_minor, &info->agp_version_minor)
+           || __get_user(i32.mode, &info->mode)
+           || __get_user(i32.aperture_base, &info->aperture_base)
+           || __get_user(i32.aperture_size, &info->aperture_size)
+           || __get_user(i32.memory_allowed, &info->memory_allowed)
+           || __get_user(i32.memory_used, &info->memory_used)
+           || __get_user(i32.id_vendor, &info->id_vendor)
+           || __get_user(i32.id_device, &info->id_device))
+               return -EFAULT;
+
+       if (copy_to_user(argp, &i32, sizeof(i32)))
+               return -EFAULT;
+
+       return 0;
+}
+
+typedef struct drm_agp_buffer32 {
+       u32 size;       /**< In bytes -- will round to page boundary */
+       u32 handle;     /**< Used for binding / unbinding */
+       u32 type;       /**< Type of memory to allocate */
+       u32 physical;   /**< Physical used by i810 */
+} drm_agp_buffer32_t;
+
+static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       drm_agp_buffer32_t __user *argp = (void __user *)arg;
+       drm_agp_buffer32_t req32;
+       struct drm_agp_buffer __user *request;
+       int err;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user(req32.size, &request->size)
+           || __put_user(req32.type, &request->type))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
+       if (err)
+               return err;
+
+       if (__get_user(req32.handle, &request->handle)
+           || __get_user(req32.physical, &request->physical)
+           || copy_to_user(argp, &req32, sizeof(req32))) {
+               drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+static int compat_drm_agp_free(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_agp_buffer32_t __user *argp = (void __user *)arg;
+       struct drm_agp_buffer __user *request;
+       u32 handle;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || get_user(handle, &argp->handle)
+           || __put_user(handle, &request->handle))
+               return -EFAULT;
+
+       return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
+}
+
+typedef struct drm_agp_binding32 {
+       u32 handle;     /**< From drm_agp_buffer */
+       u32 offset;     /**< In bytes -- will round to page boundary */
+} drm_agp_binding32_t;
+
+static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_agp_binding32_t __user *argp = (void __user *)arg;
+       drm_agp_binding32_t req32;
+       struct drm_agp_binding __user *request;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user(req32.handle, &request->handle)
+           || __put_user(req32.offset, &request->offset))
+               return -EFAULT;
+
+       return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
+}
+
+static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
+                                unsigned long arg)
+{
+       drm_agp_binding32_t __user *argp = (void __user *)arg;
+       struct drm_agp_binding __user *request;
+       u32 handle;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || get_user(handle, &argp->handle)
+           || __put_user(handle, &request->handle))
+               return -EFAULT;
+
+       return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
+}
+#endif                         /* __OS_HAS_AGP */
+
+typedef struct drm_scatter_gather32 {
+       u32 size;       /**< In bytes -- will round to page boundary */
+       u32 handle;     /**< Used for mapping / unmapping */
+} drm_scatter_gather32_t;
+
+static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_scatter_gather32_t __user *argp = (void __user *)arg;
+       struct drm_scatter_gather __user *request;
+       int err;
+       unsigned long x;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+           || __get_user(x, &argp->size)
+           || __put_user(x, &request->size))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
+       if (err)
+               return err;
+
+       /* XXX not sure about the handle conversion here... */
+       if (__get_user(x, &request->handle)
+           || __put_user(x >> PAGE_SHIFT, &argp->handle))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int compat_drm_sg_free(struct file *file, unsigned int cmd,
+                             unsigned long arg)
+{
+       drm_scatter_gather32_t __user *argp = (void __user *)arg;
+       struct drm_scatter_gather __user *request;
+       unsigned long x;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+           || __get_user(x, &argp->handle)
+           || __put_user(x << PAGE_SHIFT, &request->handle))
+               return -EFAULT;
+
+       return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
+}
+
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+typedef struct drm_update_draw32 {
+       drm_drawable_t handle;
+       unsigned int type;
+       unsigned int num;
+       /* 64-bit version has a 32-bit pad here */
+       u64 data;       /**< Pointer */
+} __attribute__((packed)) drm_update_draw32_t;
+
+static int compat_drm_update_draw(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       drm_update_draw32_t update32;
+       struct drm_update_draw __user *request;
+       int err;
+
+       if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+           __put_user(update32.handle, &request->handle) ||
+           __put_user(update32.type, &request->type) ||
+           __put_user(update32.num, &request->num) ||
+           __put_user(update32.data, &request->data))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
+       return err;
+}
+#endif
+
+struct drm_wait_vblank_request32 {
+       enum drm_vblank_seq_type type;
+       unsigned int sequence;
+       u32 signal;
+};
+
+struct drm_wait_vblank_reply32 {
+       enum drm_vblank_seq_type type;
+       unsigned int sequence;
+       s32 tval_sec;
+       s32 tval_usec;
+};
+
+typedef union drm_wait_vblank32 {
+       struct drm_wait_vblank_request32 request;
+       struct drm_wait_vblank_reply32 reply;
+} drm_wait_vblank32_t;
+
+static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       drm_wait_vblank32_t __user *argp = (void __user *)arg;
+       drm_wait_vblank32_t req32;
+       union drm_wait_vblank __user *request;
+       int err;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user(req32.request.type, &request->request.type)
+           || __put_user(req32.request.sequence, &request->request.sequence)
+           || __put_user(req32.request.signal, &request->request.signal))
+               return -EFAULT;
+
+       err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
+       if (err)
+               return err;
+
+       if (__get_user(req32.reply.type, &request->reply.type)
+           || __get_user(req32.reply.sequence, &request->reply.sequence)
+           || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
+           || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
+               return -EFAULT;
+
+       if (copy_to_user(argp, &req32, sizeof(req32)))
+               return -EFAULT;
+
+       return 0;
+}
+
+drm_ioctl_compat_t *drm_compat_ioctls[] = {
+       [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
+       [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
+       [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
+       [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
+       [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
+       [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
+       [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
+       [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
+       [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
+       [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
+       [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
+       [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
+#if __OS_HAS_AGP
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
+#endif
+       [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
+       [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+       [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
+#endif
+       [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/drm.
+ *
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+       drm_ioctl_compat_t *fn;
+       int ret;
+
+       /* Assume that ioctls without an explicit compat routine will just
+        * work.  This may not always be a good assumption, but it's better
+        * than always failing.
+        */
+       if (nr >= ARRAY_SIZE(drm_compat_ioctls))
+               return drm_ioctl(filp, cmd, arg);
+
+       fn = drm_compat_ioctls[nr];
+
+       if (fn != NULL)
+               ret = (*fn) (filp, cmd, arg);
+       else
+               ret = drm_ioctl(filp, cmd, arg);
+
+       return ret;
+}
+
+EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/services4/3rdparty/linux_drm/drm_ioctl.c b/services4/3rdparty/linux_drm/drm_ioctl.c
new file mode 100644 (file)
index 0000000..47db4df
--- /dev/null
@@ -0,0 +1,430 @@
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan  8 09:01:26 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_core.h"
+
+#include "linux/pci.h"
+
+/**
+ * Get the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from drm_device::unique into user space.
+ */
+int drm_getunique(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_unique *u = data;
+       struct drm_master *master = file_priv->master;
+
+       if (u->unique_len >= master->unique_len) {
+               if (copy_to_user(u->unique, master->unique, master->unique_len))
+                       return -EFAULT;
+       }
+       u->unique_len = master->unique_len;
+
+       return 0;
+}
+
+static void
+drm_unset_busid(struct drm_device *dev,
+               struct drm_master *master)
+{
+       kfree(dev->devname);
+       dev->devname = NULL;
+
+       kfree(master->unique);
+       master->unique = NULL;
+       master->unique_len = 0;
+       master->unique_size = 0;
+}
+
+/**
+ * Set the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from userspace into drm_device::unique, and verifies that
+ * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
+ * in interface version 1.1 and will return EBUSY when setversion has requested
+ * version 1.1 or greater.
+ */
+int drm_setunique(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_unique *u = data;
+       struct drm_master *master = file_priv->master;
+       int domain, bus, slot, func, ret;
+
+       if (master->unique_len || master->unique)
+               return -EBUSY;
+
+       if (!u->unique_len || u->unique_len > 1024)
+               return -EINVAL;
+
+       master->unique_len = u->unique_len;
+       master->unique_size = u->unique_len + 1;
+       master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+       if (!master->unique) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       if (copy_from_user(master->unique, u->unique, master->unique_len)) {
+               ret = -EFAULT;
+               goto err;
+       }
+
+       master->unique[master->unique_len] = '\0';
+
+       dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
+                              strlen(master->unique) + 2, GFP_KERNEL);
+       if (!dev->devname) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+               master->unique);
+
+       /* Return error if the busid submitted doesn't match the device's actual
+        * busid.
+        */
+       ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+       if (ret != 3) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       domain = bus >> 8;
+       bus &= 0xff;
+
+       if ((domain != drm_get_pci_domain(dev)) ||
+           (bus != dev->pdev->bus->number) ||
+           (slot != PCI_SLOT(dev->pdev->devfn)) ||
+           (func != PCI_FUNC(dev->pdev->devfn))) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       return 0;
+
+err:
+       drm_unset_busid(dev, master);
+       return ret;
+}
+
+static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_master *master = file_priv->master;
+       int len, ret;
+
+       if (master->unique != NULL)
+               drm_unset_busid(dev, master);
+
+       if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
+               master->unique_len = 10 + strlen(dev->platformdev->name);
+               master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+
+               if (master->unique == NULL)
+                       return -ENOMEM;
+
+               len = snprintf(master->unique, master->unique_len,
+                       "platform:%s", dev->platformdev->name);
+
+               if (len > master->unique_len) {
+                       DRM_ERROR("Unique buffer overflowed\n");
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               dev->devname =
+                       kmalloc(strlen(dev->platformdev->name) +
+                               master->unique_len + 2, GFP_KERNEL);
+
+               if (dev->devname == NULL) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               sprintf(dev->devname, "%s@%s", dev->platformdev->name,
+                       master->unique);
+
+       } else {
+               master->unique_len = 40;
+               master->unique_size = master->unique_len;
+               master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+               if (master->unique == NULL)
+                       return -ENOMEM;
+
+               len = snprintf(master->unique, master->unique_len,
+                       "pci:%04x:%02x:%02x.%d",
+                       drm_get_pci_domain(dev),
+                       dev->pdev->bus->number,
+                       PCI_SLOT(dev->pdev->devfn),
+                       PCI_FUNC(dev->pdev->devfn));
+               if (len >= master->unique_len) {
+                       DRM_ERROR("buffer overflow");
+                       ret = -EINVAL;
+                       goto err;
+               } else
+                       master->unique_len = len;
+
+               dev->devname =
+                       kmalloc(strlen(dev->driver->pci_driver.name) +
+                               master->unique_len + 2, GFP_KERNEL);
+
+               if (dev->devname == NULL) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+                       master->unique);
+       }
+
+       return 0;
+
+err:
+       drm_unset_busid(dev, master);
+       return ret;
+}
+
+/**
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+int drm_getmap(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_map *map = data;
+       struct drm_map_list *r_list = NULL;
+       struct list_head *list;
+       int idx;
+       int i;
+
+       idx = map->offset;
+
+       mutex_lock(&dev->struct_mutex);
+       if (idx < 0) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       i = 0;
+       list_for_each(list, &dev->maplist) {
+               if (i == idx) {
+                       r_list = list_entry(list, struct drm_map_list, head);
+                       break;
+               }
+               i++;
+       }
+       if (!r_list || !r_list->map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       map->offset = r_list->map->offset;
+       map->size = r_list->map->size;
+       map->type = r_list->map->type;
+       map->flags = r_list->map->flags;
+       map->handle = (void *)(unsigned long) r_list->user_token;
+       map->mtrr = r_list->map->mtrr;
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Get client information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_client structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the client with the specified index and copies its information
+ * into userspace
+ */
+int drm_getclient(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_client *client = data;
+       struct drm_file *pt;
+       int idx;
+       int i;
+
+       idx = client->idx;
+       mutex_lock(&dev->struct_mutex);
+
+       i = 0;
+       list_for_each_entry(pt, &dev->filelist, lhead) {
+               if (i++ >= idx) {
+                       client->auth = pt->authenticated;
+                       client->pid = pt->pid;
+                       client->uid = pt->uid;
+                       client->magic = pt->magic;
+                       client->iocs = pt->ioctl_count;
+                       mutex_unlock(&dev->struct_mutex);
+
+                       return 0;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return -EINVAL;
+}
+
+/**
+ * Get statistics information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_stats structure.
+ *
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getstats(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_stats *stats = data;
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+
+       mutex_lock(&dev->struct_mutex);
+
+       for (i = 0; i < dev->counters; i++) {
+               if (dev->types[i] == _DRM_STAT_LOCK)
+                       stats->data[i].value =
+                           (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+               else
+                       stats->data[i].value = atomic_read(&dev->counts[i]);
+               stats->data[i].type = dev->types[i];
+       }
+
+       stats->count = dev->counters;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Setversion ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Sets the requested interface version
+ */
+int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_set_version *sv = data;
+       int if_version, retcode = 0;
+
+       if (sv->drm_di_major != -1) {
+               if (sv->drm_di_major != DRM_IF_MAJOR ||
+                   sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
+                       retcode = -EINVAL;
+                       goto done;
+               }
+               if_version = DRM_IF_VERSION(sv->drm_di_major,
+                                           sv->drm_di_minor);
+               dev->if_version = max(if_version, dev->if_version);
+               if (sv->drm_di_minor >= 1) {
+                       /*
+                        * Version 1.1 includes tying of DRM to specific device
+                        * Version 1.4 has proper PCI domain support
+                        */
+                       retcode = drm_set_busid(dev, file_priv);
+                       if (retcode)
+                               goto done;
+               }
+       }
+
+       if (sv->drm_dd_major != -1) {
+               if (sv->drm_dd_major != dev->driver->major ||
+                   sv->drm_dd_minor < 0 || sv->drm_dd_minor >
+                   dev->driver->minor) {
+                       retcode = -EINVAL;
+                       goto done;
+               }
+
+               if (dev->driver->set_version)
+                       dev->driver->set_version(dev, sv);
+       }
+
+done:
+       sv->drm_di_major = DRM_IF_MAJOR;
+       sv->drm_di_minor = DRM_IF_MINOR;
+       sv->drm_dd_major = dev->driver->major;
+       sv->drm_dd_minor = dev->driver->minor;
+
+       return retcode;
+}
+
+/** No-op ioctl. */
+int drm_noop(struct drm_device *dev, void *data,
+            struct drm_file *file_priv)
+{
+       DRM_DEBUG("\n");
+       return 0;
+}
diff --git a/services4/3rdparty/linux_drm/drm_irq.c b/services4/3rdparty/linux_drm/drm_irq.c
new file mode 100644 (file)
index 0000000..16d5155
--- /dev/null
@@ -0,0 +1,799 @@
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_trace.h"
+
+#include <linux/interrupt.h>   /* For task queue support */
+#include <linux/slab.h>
+
+#include <linux/vgaarb.h>
+/**
+ * Get interrupt from bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_irq_busid *p = data;
+
+       if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
+               return -EINVAL;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+           (p->busnum & 0xff) != dev->pdev->bus->number ||
+           p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+               return -EINVAL;
+
+       p->irq = dev->pdev->irq;
+
+       DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+                 p->irq);
+
+       return 0;
+}
+
+static void vblank_disable_fn(unsigned long arg)
+{
+       struct drm_device *dev = (struct drm_device *)arg;
+       unsigned long irqflags;
+       int i;
+
+       if (!dev->vblank_disable_allowed)
+               return;
+
+       for (i = 0; i < dev->num_crtcs; i++) {
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+               if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+                   dev->vblank_enabled[i]) {
+                       DRM_DEBUG("disabling vblank on crtc %d\n", i);
+                       dev->last_vblank[i] =
+                               dev->driver->get_vblank_counter(dev, i);
+                       dev->driver->disable_vblank(dev, i);
+                       dev->vblank_enabled[i] = 0;
+               }
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+       }
+}
+
+void drm_vblank_cleanup(struct drm_device *dev)
+{
+       /* Bail if the driver didn't call drm_vblank_init() */
+       if (dev->num_crtcs == 0)
+               return;
+
+       del_timer(&dev->vblank_disable_timer);
+
+       vblank_disable_fn((unsigned long)dev);
+
+       kfree(dev->vbl_queue);
+       kfree(dev->_vblank_count);
+       kfree(dev->vblank_refcount);
+       kfree(dev->vblank_enabled);
+       kfree(dev->last_vblank);
+       kfree(dev->last_vblank_wait);
+       kfree(dev->vblank_inmodeset);
+
+       dev->num_crtcs = 0;
+}
+EXPORT_SYMBOL(drm_vblank_cleanup);
+
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+       int i, ret = -ENOMEM;
+
+       setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+                   (unsigned long)dev);
+       spin_lock_init(&dev->vbl_lock);
+       dev->num_crtcs = num_crtcs;
+
+       dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
+                                GFP_KERNEL);
+       if (!dev->vbl_queue)
+               goto err;
+
+       dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
+       if (!dev->_vblank_count)
+               goto err;
+
+       dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
+                                      GFP_KERNEL);
+       if (!dev->vblank_refcount)
+               goto err;
+
+       dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
+       if (!dev->vblank_enabled)
+               goto err;
+
+       dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
+       if (!dev->last_vblank)
+               goto err;
+
+       dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
+       if (!dev->last_vblank_wait)
+               goto err;
+
+       dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
+       if (!dev->vblank_inmodeset)
+               goto err;
+
+       /* Zero per-crtc vblank stuff */
+       for (i = 0; i < num_crtcs; i++) {
+               init_waitqueue_head(&dev->vbl_queue[i]);
+               atomic_set(&dev->_vblank_count[i], 0);
+               atomic_set(&dev->vblank_refcount[i], 0);
+       }
+
+       dev->vblank_disable_allowed = 0;
+       return 0;
+
+err:
+       drm_vblank_cleanup(dev);
+       return ret;
+}
+EXPORT_SYMBOL(drm_vblank_init);
+
+static void drm_irq_vgaarb_nokms(void *cookie, bool state)
+{
+       struct drm_device *dev = cookie;
+
+       if (dev->driver->vgaarb_irq) {
+               dev->driver->vgaarb_irq(dev, state);
+               return;
+       }
+
+       if (!dev->irq_enabled)
+               return;
+
+       if (state)
+               dev->driver->irq_uninstall(dev);
+       else {
+               dev->driver->irq_preinstall(dev);
+               dev->driver->irq_postinstall(dev);
+       }
+}
+
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Initializes the IRQ related data. Installs the handler, calling the driver
+ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
+ * before and after the installation.
+ */
+int drm_irq_install(struct drm_device *dev)
+{
+       int ret = 0;
+       unsigned long sh_flags = 0;
+       char *irqname;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       if (drm_dev_to_irq(dev) == 0)
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+
+       /* Driver must have been initialized */
+       if (!dev->dev_private) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       if (dev->irq_enabled) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EBUSY;
+       }
+       dev->irq_enabled = 1;
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
+
+       /* Before installing handler */
+       dev->driver->irq_preinstall(dev);
+
+       /* Install handler */
+       if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
+               sh_flags = IRQF_SHARED;
+
+       if (dev->devname)
+               irqname = dev->devname;
+       else
+               irqname = dev->driver->name;
+
+       ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
+                         sh_flags, irqname, dev);
+
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
+
+       /* After installing handler */
+       ret = dev->driver->irq_postinstall(dev);
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_irq_install);
+
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
+ */
+int drm_irq_uninstall(struct drm_device * dev)
+{
+       unsigned long irqflags;
+       int irq_enabled, i;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+       irq_enabled = dev->irq_enabled;
+       dev->irq_enabled = 0;
+       mutex_unlock(&dev->struct_mutex);
+
+       /*
+        * Wake up any waiters so they don't hang.
+        */
+       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       for (i = 0; i < dev->num_crtcs; i++) {
+               DRM_WAKEUP(&dev->vbl_queue[i]);
+               dev->vblank_enabled[i] = 0;
+               dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
+       }
+       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+       if (!irq_enabled)
+               return -EINVAL;
+
+       DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               vga_client_register(dev->pdev, NULL, NULL, NULL);
+
+       dev->driver->irq_uninstall(dev);
+
+       free_irq(drm_dev_to_irq(dev), dev);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_irq_uninstall);
+
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
+int drm_control(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_control *ctl = data;
+
+       /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+
+
+       switch (ctl->func) {
+       case DRM_INST_HANDLER:
+               if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+                       return 0;
+               if (drm_core_check_feature(dev, DRIVER_MODESET))
+                       return 0;
+               if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+                   ctl->irq != drm_dev_to_irq(dev))
+                       return -EINVAL;
+               return drm_irq_install(dev);
+       case DRM_UNINST_HANDLER:
+               if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+                       return 0;
+               if (drm_core_check_feature(dev, DRIVER_MODESET))
+                       return 0;
+               return drm_irq_uninstall(dev);
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+       return atomic_read(&dev->_vblank_count[crtc]);
+}
+EXPORT_SYMBOL(drm_vblank_count);
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc).  Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+{
+       u32 cur_vblank, diff;
+
+       /*
+        * Interrupts were disabled prior to this call, so deal with counter
+        * wrap if needed.
+        * NOTE!  It's possible we lost a full dev->max_vblank_count events
+        * here if the register is small or we had vblank interrupts off for
+        * a long time.
+        */
+       cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+       diff = cur_vblank - dev->last_vblank[crtc];
+       if (cur_vblank < dev->last_vblank[crtc]) {
+               diff += dev->max_vblank_count;
+
+               DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+                         crtc, dev->last_vblank[crtc], cur_vblank, diff);
+       }
+
+       DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+                 crtc, diff);
+
+       atomic_add(diff, &dev->_vblank_count[crtc]);
+}
+
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
+{
+       unsigned long irqflags;
+       int ret = 0;
+
+       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       /* Going from 0->1 means we have to enable interrupts again */
+       if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+               if (!dev->vblank_enabled[crtc]) {
+                       ret = dev->driver->enable_vblank(dev, crtc);
+                       DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+                       if (ret)
+                               atomic_dec(&dev->vblank_refcount[crtc]);
+                       else {
+                               dev->vblank_enabled[crtc] = 1;
+                               drm_update_vblank_count(dev, crtc);
+                       }
+               }
+       } else {
+               if (!dev->vblank_enabled[crtc]) {
+                       atomic_dec(&dev->vblank_refcount[crtc]);
+                       ret = -EINVAL;
+               }
+       }
+       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_vblank_get);
+
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
+{
+       BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+
+       /* Last user schedules interrupt disable */
+       if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
+               mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+}
+EXPORT_SYMBOL(drm_vblank_put);
+
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       dev->driver->disable_vblank(dev, crtc);
+       DRM_WAKEUP(&dev->vbl_queue[crtc]);
+       dev->vblank_enabled[crtc] = 0;
+       dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_off);
+
+/**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @post: post or pre mode set?
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
+ */
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+{
+       /* vblank is not initialized (IRQ not installed ?) */
+       if (!dev->num_crtcs)
+               return;
+       /*
+        * To avoid all the problems that might happen if interrupts
+        * were enabled/disabled around or between these calls, we just
+        * have the kernel take a reference on the CRTC (just once though
+        * to avoid corrupting the count if multiple, mismatch calls occur),
+        * so that interrupts remain enabled in the interim.
+        */
+       if (!dev->vblank_inmodeset[crtc]) {
+               dev->vblank_inmodeset[crtc] = 0x1;
+               if (drm_vblank_get(dev, crtc) == 0)
+                       dev->vblank_inmodeset[crtc] |= 0x2;
+       }
+}
+EXPORT_SYMBOL(drm_vblank_pre_modeset);
+
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+{
+       unsigned long irqflags;
+
+       if (dev->vblank_inmodeset[crtc]) {
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+               dev->vblank_disable_allowed = 1;
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+               if (dev->vblank_inmodeset[crtc] & 0x2)
+                       drm_vblank_put(dev, crtc);
+
+               dev->vblank_inmodeset[crtc] = 0;
+       }
+}
+EXPORT_SYMBOL(drm_vblank_post_modeset);
+
+/**
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ *
+ * Generally the counter will reset across mode sets.  If interrupts are
+ * enabled around this call, we don't have to do anything since the counter
+ * will have already been incremented.
+ */
+int drm_modeset_ctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_modeset_ctl *modeset = data;
+       int crtc, ret = 0;
+
+       /* If drm_vblank_init() hasn't been called yet, just no-op */
+       if (!dev->num_crtcs)
+               goto out;
+
+       crtc = modeset->crtc;
+       if (crtc >= dev->num_crtcs) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       switch (modeset->cmd) {
+       case _DRM_PRE_MODESET:
+               drm_vblank_pre_modeset(dev, crtc);
+               break;
+       case _DRM_POST_MODESET:
+               drm_vblank_post_modeset(dev, crtc);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+out:
+       return ret;
+}
+
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+                                 union drm_wait_vblank *vblwait,
+                                 struct drm_file *file_priv)
+{
+       struct drm_pending_vblank_event *e;
+       struct timeval now;
+       unsigned long flags;
+       unsigned int seq;
+       int ret;
+
+       e = kzalloc(sizeof *e, GFP_KERNEL);
+       if (e == NULL) {
+               ret = -ENOMEM;
+               goto err_put;
+       }
+
+       e->pipe = pipe;
+       e->base.pid = current->pid;
+       e->event.base.type = DRM_EVENT_VBLANK;
+       e->event.base.length = sizeof e->event;
+       e->event.user_data = vblwait->request.signal;
+       e->base.event = &e->event.base;
+       e->base.file_priv = file_priv;
+       e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+       do_gettimeofday(&now);
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       if (file_priv->event_space < sizeof e->event) {
+               ret = -EBUSY;
+               goto err_unlock;
+       }
+
+       file_priv->event_space -= sizeof e->event;
+       seq = drm_vblank_count(dev, pipe);
+       if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+           (seq - vblwait->request.sequence) <= (1 << 23)) {
+               vblwait->request.sequence = seq + 1;
+               vblwait->reply.sequence = vblwait->request.sequence;
+       }
+
+       DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+                 vblwait->request.sequence, seq, pipe);
+
+       trace_drm_vblank_event_queued(current->pid, pipe,
+                                     vblwait->request.sequence);
+
+       e->event.sequence = vblwait->request.sequence;
+       if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+               drm_vblank_put(dev, pipe);
+               list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+               trace_drm_vblank_event_delivered(current->pid, pipe,
+                                                vblwait->request.sequence);
+       } else {
+               list_add_tail(&e->base.link, &dev->vblank_event_list);
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       return 0;
+
+err_unlock:
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+       kfree(e);
+err_put:
+       drm_vblank_put(dev, pipe);
+       return ret;
+}
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * This function enables the vblank interrupt on the pipe requested, then
+ * sleeps waiting for the requested sequence number to occur, and drops
+ * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * after a timeout with no further vblank waits scheduled).
+ */
+int drm_wait_vblank(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       union drm_wait_vblank *vblwait = data;
+       int ret = 0;
+       unsigned int flags, seq, crtc;
+
+       if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
+               return -EINVAL;
+
+       if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+               return -EINVAL;
+
+       if (vblwait->request.type &
+           ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+               DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+                         vblwait->request.type,
+                         (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
+               return -EINVAL;
+       }
+
+       flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+       crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+
+       if (crtc >= dev->num_crtcs)
+               return -EINVAL;
+
+       ret = drm_vblank_get(dev, crtc);
+       if (ret) {
+               DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
+               return ret;
+       }
+       seq = drm_vblank_count(dev, crtc);
+
+       switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
+       case _DRM_VBLANK_RELATIVE:
+               vblwait->request.sequence += seq;
+               vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+       case _DRM_VBLANK_ABSOLUTE:
+               break;
+       default:
+               ret = -EINVAL;
+               goto done;
+       }
+
+       if (flags & _DRM_VBLANK_EVENT)
+               return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+
+       if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+           (seq - vblwait->request.sequence) <= (1<<23)) {
+               vblwait->request.sequence = seq + 1;
+       }
+
+       DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+                 vblwait->request.sequence, crtc);
+       dev->last_vblank_wait[crtc] = vblwait->request.sequence;
+       DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+                   (((drm_vblank_count(dev, crtc) -
+                      vblwait->request.sequence) <= (1 << 23)) ||
+                    !dev->irq_enabled));
+
+       if (ret != -EINTR) {
+               struct timeval now;
+
+               do_gettimeofday(&now);
+
+               vblwait->reply.tval_sec = now.tv_sec;
+               vblwait->reply.tval_usec = now.tv_usec;
+               vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+               DRM_DEBUG("returning %d to client\n",
+                         vblwait->reply.sequence);
+       } else {
+               DRM_DEBUG("vblank wait interrupted by signal\n");
+       }
+
+done:
+       drm_vblank_put(dev, crtc);
+       return ret;
+}
+
+void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+       struct drm_pending_vblank_event *e, *t;
+       struct timeval now;
+       unsigned long flags;
+       unsigned int seq;
+
+       do_gettimeofday(&now);
+       seq = drm_vblank_count(dev, crtc);
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+               if (e->pipe != crtc)
+                       continue;
+               if ((seq - e->event.sequence) > (1<<23))
+                       continue;
+
+               DRM_DEBUG("vblank event on %d, current %d\n",
+                         e->event.sequence, seq);
+
+               e->event.sequence = seq;
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+               drm_vblank_put(dev, e->pipe);
+               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+               trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+                                                e->event.sequence);
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       trace_drm_vblank_event(crtc, seq);
+}
+
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+void drm_handle_vblank(struct drm_device *dev, int crtc)
+{
+       if (!dev->num_crtcs)
+               return;
+
+       atomic_inc(&dev->_vblank_count[crtc]);
+       DRM_WAKEUP(&dev->vbl_queue[crtc]);
+       drm_handle_vblank_events(dev, crtc);
+}
+EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/services4/3rdparty/linux_drm/drm_lock.c b/services4/3rdparty/linux_drm/drm_lock.c
new file mode 100644 (file)
index 0000000..632ae24
--- /dev/null
@@ -0,0 +1,374 @@
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+static int drm_notifier(void *priv);
+
+static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+
+/**
+ * Lock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Add the current task to the lock wait queue, and attempt to take to lock.
+ */
+int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       DECLARE_WAITQUEUE(entry, current);
+       struct drm_lock *lock = data;
+       struct drm_master *master = file_priv->master;
+       int ret = 0;
+
+       ++file_priv->lock_count;
+
+       if (lock->context == DRM_KERNEL_CONTEXT) {
+               DRM_ERROR("Process %d using kernel context %d\n",
+                         task_pid_nr(current), lock->context);
+               return -EINVAL;
+       }
+
+       DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+                 lock->context, task_pid_nr(current),
+                 master->lock.hw_lock->lock, lock->flags);
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
+               if (lock->context < 0)
+                       return -EINVAL;
+
+       add_wait_queue(&master->lock.lock_queue, &entry);
+       spin_lock_bh(&master->lock.spinlock);
+       master->lock.user_waiters++;
+       spin_unlock_bh(&master->lock.spinlock);
+
+       for (;;) {
+               __set_current_state(TASK_INTERRUPTIBLE);
+               if (!master->lock.hw_lock) {
+                       /* Device has been unregistered */
+                       send_sig(SIGTERM, current, 0);
+                       ret = -EINTR;
+                       break;
+               }
+               if (drm_lock_take(&master->lock, lock->context)) {
+                       master->lock.file_priv = file_priv;
+                       master->lock.lock_time = jiffies;
+                       atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+                       break;  /* Got lock */
+               }
+
+               /* Contention */
+               mutex_unlock(&drm_global_mutex);
+               schedule();
+               mutex_lock(&drm_global_mutex);
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+       }
+       spin_lock_bh(&master->lock.spinlock);
+       master->lock.user_waiters--;
+       spin_unlock_bh(&master->lock.spinlock);
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&master->lock.lock_queue, &entry);
+
+       DRM_DEBUG("%d %s\n", lock->context,
+                 ret ? "interrupted" : "has lock");
+       if (ret) return ret;
+
+       /* don't set the block all signals on the master process for now 
+        * really probably not the correct answer but lets us debug xkb
+        * xserver for now */
+       if (!file_priv->is_master) {
+               sigemptyset(&dev->sigmask);
+               sigaddset(&dev->sigmask, SIGSTOP);
+               sigaddset(&dev->sigmask, SIGTSTP);
+               sigaddset(&dev->sigmask, SIGTTIN);
+               sigaddset(&dev->sigmask, SIGTTOU);
+               dev->sigdata.context = lock->context;
+               dev->sigdata.lock = master->lock.hw_lock;
+               block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+       }
+
+       if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+       {
+               if (dev->driver->dma_quiescent(dev)) {
+                       DRM_DEBUG("%d waiting for DMA quiescent\n",
+                                 lock->context);
+                       return -EBUSY;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * Unlock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Transfer and free the lock.
+ */
+int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_lock *lock = data;
+       struct drm_master *master = file_priv->master;
+
+       if (lock->context == DRM_KERNEL_CONTEXT) {
+               DRM_ERROR("Process %d using kernel context %d\n",
+                         task_pid_nr(current), lock->context);
+               return -EINVAL;
+       }
+
+       atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+       if (drm_lock_free(&master->lock, lock->context)) {
+               /* FIXME: Should really bail out here. */
+       }
+
+       unblock_all_signals();
+       return 0;
+}
+
+/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static
+int drm_lock_take(struct drm_lock_data *lock_data,
+                 unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       do {
+               old = *lock;
+               if (old & _DRM_LOCK_HELD)
+                       new = old | _DRM_LOCK_CONT;
+               else {
+                       new = context | _DRM_LOCK_HELD |
+                               ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+                                _DRM_LOCK_CONT : 0);
+               }
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+       spin_unlock_bh(&lock_data->spinlock);
+
+       if (_DRM_LOCKING_CONTEXT(old) == context) {
+               if (old & _DRM_LOCK_HELD) {
+                       if (context != DRM_KERNEL_CONTEXT) {
+                               DRM_ERROR("%d holds heavyweight lock\n",
+                                         context);
+                       }
+                       return 0;
+               }
+       }
+
+       if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
+               /* Have lock */
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * This takes a lock forcibly and hands it to context. Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+                            unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       lock_data->file_priv = NULL;
+       do {
+               old = *lock;
+               new = context | _DRM_LOCK_HELD;
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+       return 1;
+}
+
+/**
+ * Free lock.
+ *
+ * \param dev DRM device.
+ * \param lock lock.
+ * \param context context.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
+ * waiting on the lock queue.
+ */
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       if (lock_data->kernel_waiters != 0) {
+               drm_lock_transfer(lock_data, 0);
+               lock_data->idle_has_lock = 1;
+               spin_unlock_bh(&lock_data->spinlock);
+               return 1;
+       }
+       spin_unlock_bh(&lock_data->spinlock);
+
+       do {
+               old = *lock;
+               new = _DRM_LOCKING_CONTEXT(old);
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+
+       if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+               DRM_ERROR("%d freed heavyweight lock held by %d\n",
+                         context, _DRM_LOCKING_CONTEXT(old));
+               return 1;
+       }
+       wake_up_interruptible(&lock_data->lock_queue);
+       return 0;
+}
+
+/**
+ * If we get here, it means that the process has called DRM_IOCTL_LOCK
+ * without calling DRM_IOCTL_UNLOCK.
+ *
+ * If the lock is not held, then let the signal proceed as usual.  If the lock
+ * is held, then set the contended flag and keep the signal blocked.
+ *
+ * \param priv pointer to a drm_sigdata structure.
+ * \return one if the signal should be delivered normally, or zero if the
+ * signal should be blocked.
+ */
+static int drm_notifier(void *priv)
+{
+       struct drm_sigdata *s = (struct drm_sigdata *) priv;
+       unsigned int old, new, prev;
+
+       /* Allow signal delivery if lock isn't held */
+       if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
+           || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
+               return 1;
+
+       /* Otherwise, set flag to force call to
+          drmUnlock */
+       do {
+               old = s->lock->lock;
+               new = old | _DRM_LOCK_CONT;
+               prev = cmpxchg(&s->lock->lock, old, new);
+       } while (prev != old);
+       return 0;
+}
+
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(struct drm_lock_data *lock_data)
+{
+       int ret = 0;
+
+       spin_lock_bh(&lock_data->spinlock);
+       lock_data->kernel_waiters++;
+       if (!lock_data->idle_has_lock) {
+
+               spin_unlock_bh(&lock_data->spinlock);
+               ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
+               spin_lock_bh(&lock_data->spinlock);
+
+               if (ret == 1)
+                       lock_data->idle_has_lock = 1;
+       }
+       spin_unlock_bh(&lock_data->spinlock);
+}
+
+void drm_idlelock_release(struct drm_lock_data *lock_data)
+{
+       unsigned int old, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       if (--lock_data->kernel_waiters == 0) {
+               if (lock_data->idle_has_lock) {
+                       do {
+                               old = *lock;
+                               prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
+                       } while (prev != old);
+                       wake_up_interruptible(&lock_data->lock_queue);
+                       lock_data->idle_has_lock = 0;
+               }
+       }
+       spin_unlock_bh(&lock_data->spinlock);
+}
+
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_master *master = file_priv->master;
+       return (file_priv->lock_count && master->lock.hw_lock &&
+               _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+               master->lock.file_priv == file_priv);
+}
diff --git a/services4/3rdparty/linux_drm/drm_memory.c b/services4/3rdparty/linux_drm/drm_memory.c
new file mode 100644 (file)
index 0000000..c9b8050
--- /dev/null
@@ -0,0 +1,162 @@
+/**
+ * \file drm_memory.c
+ * Memory management wrappers for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include "drmP.h"
+
+/**
+ * Called when "/proc/dri/%dev%/mem" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param len requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ *
+ * No-op.
+ */
+int drm_mem_info(char *buf, char **start, off_t offset,
+                int len, int *eof, void *data)
+{
+       return 0;
+}
+
+#if __OS_HAS_AGP
+static void *agp_remap(unsigned long offset, unsigned long size,
+                      struct drm_device * dev)
+{
+       unsigned long i, num_pages =
+           PAGE_ALIGN(size) / PAGE_SIZE;
+       struct drm_agp_mem *agpmem;
+       struct page **page_map;
+       struct page **phys_page_map;
+       void *addr;
+
+       size = PAGE_ALIGN(size);
+
+#ifdef __alpha__
+       offset -= dev->hose->mem_space->start;
+#endif
+
+       list_for_each_entry(agpmem, &dev->agp->memory, head)
+               if (agpmem->bound <= offset
+                   && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
+                   (offset + size))
+                       break;
+       if (&agpmem->head == &dev->agp->memory)
+               return NULL;
+
+       /*
+        * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
+        * the CPU do not get remapped by the GART.  We fix this by using the kernel's
+        * page-table instead (that's probably faster anyhow...).
+        */
+       /* note: use vmalloc() because num_pages could be large... */
+       page_map = vmalloc(num_pages * sizeof(struct page *));
+       if (!page_map)
+               return NULL;
+
+       phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
+       for (i = 0; i < num_pages; ++i)
+               page_map[i] = phys_page_map[i];
+       addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
+       vfree(page_map);
+
+       return addr;
+}
+
+/** Wrapper around agp_free_memory() */
+void drm_free_agp(DRM_AGP_MEM * handle, int pages)
+{
+       agp_free_memory(handle);
+}
+EXPORT_SYMBOL(drm_free_agp);
+
+/** Wrapper around agp_bind_memory() */
+int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+{
+       return agp_bind_memory(handle, start);
+}
+
+/** Wrapper around agp_unbind_memory() */
+int drm_unbind_agp(DRM_AGP_MEM * handle)
+{
+       return agp_unbind_memory(handle);
+}
+EXPORT_SYMBOL(drm_unbind_agp);
+
+#else  /*  __OS_HAS_AGP  */
+static inline void *agp_remap(unsigned long offset, unsigned long size,
+                             struct drm_device * dev)
+{
+       return NULL;
+}
+
+#endif                         /* agp */
+
+void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
+{
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               map->handle = agp_remap(map->offset, map->size, dev);
+       else
+               map->handle = ioremap(map->offset, map->size);
+}
+EXPORT_SYMBOL(drm_core_ioremap);
+
+void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
+{
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               map->handle = agp_remap(map->offset, map->size, dev);
+       else
+               map->handle = ioremap_wc(map->offset, map->size);
+}
+EXPORT_SYMBOL(drm_core_ioremap_wc);
+
+void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
+{
+       if (!map->handle || !map->size)
+               return;
+
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               vunmap(map->handle);
+       else
+               iounmap(map->handle);
+}
+EXPORT_SYMBOL(drm_core_ioremapfree);
diff --git a/services4/3rdparty/linux_drm/drm_mm.c b/services4/3rdparty/linux_drm/drm_mm.c
new file mode 100644 (file)
index 0000000..a6bfc30
--- /dev/null
@@ -0,0 +1,620 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented. Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Aligned allocations can also see improvement.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm_mm.h"
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+
+#define MM_UNUSED_TARGET 4
+
+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
+{
+       struct drm_mm_node *child;
+
+       if (atomic)
+               child = kzalloc(sizeof(*child), GFP_ATOMIC);
+       else
+               child = kzalloc(sizeof(*child), GFP_KERNEL);
+
+       if (unlikely(child == NULL)) {
+               spin_lock(&mm->unused_lock);
+               if (list_empty(&mm->unused_nodes))
+                       child = NULL;
+               else {
+                       child =
+                           list_entry(mm->unused_nodes.next,
+                                      struct drm_mm_node, free_stack);
+                       list_del(&child->free_stack);
+                       --mm->num_unused;
+               }
+               spin_unlock(&mm->unused_lock);
+       }
+       return child;
+}
+
+/* drm_mm_pre_get() - pre allocate drm_mm_node structure
+ * drm_mm:     memory manager struct we are pre-allocating for
+ *
+ * Returns 0 on success or -ENOMEM if allocation fails.
+ */
+int drm_mm_pre_get(struct drm_mm *mm)
+{
+       struct drm_mm_node *node;
+
+       spin_lock(&mm->unused_lock);
+       while (mm->num_unused < MM_UNUSED_TARGET) {
+               spin_unlock(&mm->unused_lock);
+               node = kzalloc(sizeof(*node), GFP_KERNEL);
+               spin_lock(&mm->unused_lock);
+
+               if (unlikely(node == NULL)) {
+                       int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
+                       spin_unlock(&mm->unused_lock);
+                       return ret;
+               }
+               ++mm->num_unused;
+               list_add_tail(&node->free_stack, &mm->unused_nodes);
+       }
+       spin_unlock(&mm->unused_lock);
+       return 0;
+}
+EXPORT_SYMBOL(drm_mm_pre_get);
+
+static int drm_mm_create_tail_node(struct drm_mm *mm,
+                                  unsigned long start,
+                                  unsigned long size, int atomic)
+{
+       struct drm_mm_node *child;
+
+       child = drm_mm_kmalloc(mm, atomic);
+       if (unlikely(child == NULL))
+               return -ENOMEM;
+
+       child->free = 1;
+       child->size = size;
+       child->start = start;
+       child->mm = mm;
+
+       list_add_tail(&child->node_list, &mm->node_list);
+       list_add_tail(&child->free_stack, &mm->free_stack);
+
+       return 0;
+}
+
+static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+                                                unsigned long size,
+                                                int atomic)
+{
+       struct drm_mm_node *child;
+
+       child = drm_mm_kmalloc(parent->mm, atomic);
+       if (unlikely(child == NULL))
+               return NULL;
+
+       INIT_LIST_HEAD(&child->free_stack);
+
+       child->size = size;
+       child->start = parent->start;
+       child->mm = parent->mm;
+
+       list_add_tail(&child->node_list, &parent->node_list);
+       INIT_LIST_HEAD(&child->free_stack);
+
+       parent->size -= size;
+       parent->start += size;
+       return child;
+}
+
+
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+                                            unsigned long size,
+                                            unsigned alignment,
+                                            int atomic)
+{
+
+       struct drm_mm_node *align_splitoff = NULL;
+       unsigned tmp = 0;
+
+       if (alignment)
+               tmp = node->start % alignment;
+
+       if (tmp) {
+               align_splitoff =
+                   drm_mm_split_at_start(node, alignment - tmp, atomic);
+               if (unlikely(align_splitoff == NULL))
+                       return NULL;
+       }
+
+       if (node->size == size) {
+               list_del_init(&node->free_stack);
+               node->free = 0;
+       } else {
+               node = drm_mm_split_at_start(node, size, atomic);
+       }
+
+       if (align_splitoff)
+               drm_mm_put_block(align_splitoff);
+
+       return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_generic);
+
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long start,
+                                               unsigned long end,
+                                               int atomic)
+{
+       struct drm_mm_node *align_splitoff = NULL;
+       unsigned tmp = 0;
+       unsigned wasted = 0;
+
+       if (node->start < start)
+               wasted += start - node->start;
+       if (alignment)
+               tmp = ((node->start + wasted) % alignment);
+
+       if (tmp)
+               wasted += alignment - tmp;
+       if (wasted) {
+               align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
+               if (unlikely(align_splitoff == NULL))
+                       return NULL;
+       }
+
+       if (node->size == size) {
+               list_del_init(&node->free_stack);
+               node->free = 0;
+       } else {
+               node = drm_mm_split_at_start(node, size, atomic);
+       }
+
+       if (align_splitoff)
+               drm_mm_put_block(align_splitoff);
+
+       return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_range_generic);
+
+/*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+void drm_mm_put_block(struct drm_mm_node *cur)
+{
+
+       struct drm_mm *mm = cur->mm;
+       struct list_head *cur_head = &cur->node_list;
+       struct list_head *root_head = &mm->node_list;
+       struct drm_mm_node *prev_node = NULL;
+       struct drm_mm_node *next_node;
+
+       int merged = 0;
+
+       BUG_ON(cur->scanned_block || cur->scanned_prev_free
+                                 || cur->scanned_next_free);
+
+       if (cur_head->prev != root_head) {
+               prev_node =
+                   list_entry(cur_head->prev, struct drm_mm_node, node_list);
+               if (prev_node->free) {
+                       prev_node->size += cur->size;
+                       merged = 1;
+               }
+       }
+       if (cur_head->next != root_head) {
+               next_node =
+                   list_entry(cur_head->next, struct drm_mm_node, node_list);
+               if (next_node->free) {
+                       if (merged) {
+                               prev_node->size += next_node->size;
+                               list_del(&next_node->node_list);
+                               list_del(&next_node->free_stack);
+                               spin_lock(&mm->unused_lock);
+                               if (mm->num_unused < MM_UNUSED_TARGET) {
+                                       list_add(&next_node->free_stack,
+                                                &mm->unused_nodes);
+                                       ++mm->num_unused;
+                               } else
+                                       kfree(next_node);
+                               spin_unlock(&mm->unused_lock);
+                       } else {
+                               next_node->size += cur->size;
+                               next_node->start = cur->start;
+                               merged = 1;
+                       }
+               }
+       }
+       if (!merged) {
+               cur->free = 1;
+               list_add(&cur->free_stack, &mm->free_stack);
+       } else {
+               list_del(&cur->node_list);
+               spin_lock(&mm->unused_lock);
+               if (mm->num_unused < MM_UNUSED_TARGET) {
+                       list_add(&cur->free_stack, &mm->unused_nodes);
+                       ++mm->num_unused;
+               } else
+                       kfree(cur);
+               spin_unlock(&mm->unused_lock);
+       }
+}
+
+EXPORT_SYMBOL(drm_mm_put_block);
+
+static int check_free_hole(unsigned long start, unsigned long end,
+                          unsigned long size, unsigned alignment)
+{
+       unsigned wasted = 0;
+
+       if (end - start < size)
+               return 0;
+
+       if (alignment) {
+               unsigned tmp = start % alignment;
+               if (tmp)
+                       wasted = alignment - tmp;
+       }
+
+       if (end >= start + size + wasted) {
+               return 1;
+       }
+
+       return 0;
+}
+
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+                                      unsigned long size,
+                                      unsigned alignment, int best_match)
+{
+       struct drm_mm_node *entry;
+       struct drm_mm_node *best;
+       unsigned long best_size;
+
+       BUG_ON(mm->scanned_blocks);
+
+       best = NULL;
+       best_size = ~0UL;
+
+       list_for_each_entry(entry, &mm->free_stack, free_stack) {
+               if (!check_free_hole(entry->start, entry->start + entry->size,
+                                    size, alignment))
+                       continue;
+
+               if (!best_match)
+                       return entry;
+
+               if (entry->size < best_size) {
+                       best = entry;
+                       best_size = entry->size;
+               }
+       }
+
+       return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free);
+
+struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long start,
+                                               unsigned long end,
+                                               int best_match)
+{
+       struct drm_mm_node *entry;
+       struct drm_mm_node *best;
+       unsigned long best_size;
+
+       BUG_ON(mm->scanned_blocks);
+
+       best = NULL;
+       best_size = ~0UL;
+
+       list_for_each_entry(entry, &mm->free_stack, free_stack) {
+               unsigned long adj_start = entry->start < start ?
+                       start : entry->start;
+               unsigned long adj_end = entry->start + entry->size > end ?
+                       end : entry->start + entry->size;
+
+               if (!check_free_hole(adj_start, adj_end, size, alignment))
+                       continue;
+
+               if (!best_match)
+                       return entry;
+
+               if (entry->size < best_size) {
+                       best = entry;
+                       best_size = entry->size;
+               }
+       }
+
+       return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_in_range);
+
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
+                     unsigned alignment)
+{
+       mm->scan_alignment = alignment;
+       mm->scan_size = size;
+       mm->scanned_blocks = 0;
+       mm->scan_hit_start = 0;
+       mm->scan_hit_size = 0;
+}
+EXPORT_SYMBOL(drm_mm_init_scan);
+
+/**
+ * Add a node to the scan list that might be freed to make space for the desired
+ * hole.
+ *
+ * Returns non-zero, if a hole has been found, zero otherwise.
+ */
+int drm_mm_scan_add_block(struct drm_mm_node *node)
+{
+       struct drm_mm *mm = node->mm;
+       struct list_head *prev_free, *next_free;
+       struct drm_mm_node *prev_node, *next_node;
+
+       mm->scanned_blocks++;
+
+       prev_free = next_free = NULL;
+
+       BUG_ON(node->free);
+       node->scanned_block = 1;
+       node->free = 1;
+
+       if (node->node_list.prev != &mm->node_list) {
+               prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+                                      node_list);
+
+               if (prev_node->free) {
+                       list_del(&prev_node->node_list);
+
+                       node->start = prev_node->start;
+                       node->size += prev_node->size;
+
+                       prev_node->scanned_prev_free = 1;
+
+                       prev_free = &prev_node->free_stack;
+               }
+       }
+
+       if (node->node_list.next != &mm->node_list) {
+               next_node = list_entry(node->node_list.next, struct drm_mm_node,
+                                      node_list);
+
+               if (next_node->free) {
+                       list_del(&next_node->node_list);
+
+                       node->size += next_node->size;
+
+                       next_node->scanned_next_free = 1;
+
+                       next_free = &next_node->free_stack;
+               }
+       }
+
+       /* The free_stack list is not used for allocated objects, so these two
+        * pointers can be abused (as long as no allocations in this memory
+        * manager happens). */
+       node->free_stack.prev = prev_free;
+       node->free_stack.next = next_free;
+
+       if (check_free_hole(node->start, node->start + node->size,
+                           mm->scan_size, mm->scan_alignment)) {
+               mm->scan_hit_start = node->start;
+               mm->scan_hit_size = node->size;
+
+               return 1;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mm_scan_add_block);
+
+/**
+ * Remove a node from the scan list.
+ *
+ * Nodes _must_ be removed in the exact same order from the scan list as they
+ * have been added, otherwise the internal state of the memory manager will be
+ * corrupted.
+ *
+ * When the scan list is empty, the selected memory nodes can be freed. An
+ * immediatly following drm_mm_search_free with best_match = 0 will then return
+ * the just freed block (because its at the top of the free_stack list).
+ *
+ * Returns one if this block should be evicted, zero otherwise. Will always
+ * return zero when no hole has been found.
+ */
+int drm_mm_scan_remove_block(struct drm_mm_node *node)
+{
+       struct drm_mm *mm = node->mm;
+       struct drm_mm_node *prev_node, *next_node;
+
+       mm->scanned_blocks--;
+
+       BUG_ON(!node->scanned_block);
+       node->scanned_block = 0;
+       node->free = 0;
+
+       prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
+                              free_stack);
+       next_node = list_entry(node->free_stack.next, struct drm_mm_node,
+                              free_stack);
+
+       if (prev_node) {
+               BUG_ON(!prev_node->scanned_prev_free);
+               prev_node->scanned_prev_free = 0;
+
+               list_add_tail(&prev_node->node_list, &node->node_list);
+
+               node->start = prev_node->start + prev_node->size;
+               node->size -= prev_node->size;
+       }
+
+       if (next_node) {
+               BUG_ON(!next_node->scanned_next_free);
+               next_node->scanned_next_free = 0;
+
+               list_add(&next_node->node_list, &node->node_list);
+
+               node->size -= next_node->size;
+       }
+
+       INIT_LIST_HEAD(&node->free_stack);
+
+       /* Only need to check for containement because start&size for the
+        * complete resulting free block (not just the desired part) is
+        * stored. */
+       if (node->start >= mm->scan_hit_start &&
+           node->start + node->size
+                       <= mm->scan_hit_start + mm->scan_hit_size) {
+               return 1;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mm_scan_remove_block);
+
+int drm_mm_clean(struct drm_mm * mm)
+{
+       struct list_head *head = &mm->node_list;
+
+       return (head->next->next == head);
+}
+EXPORT_SYMBOL(drm_mm_clean);
+
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+{
+       INIT_LIST_HEAD(&mm->node_list);
+       INIT_LIST_HEAD(&mm->free_stack);
+       INIT_LIST_HEAD(&mm->unused_nodes);
+       mm->num_unused = 0;
+       mm->scanned_blocks = 0;
+       spin_lock_init(&mm->unused_lock);
+
+       return drm_mm_create_tail_node(mm, start, size, 0);
+}
+EXPORT_SYMBOL(drm_mm_init);
+
+void drm_mm_takedown(struct drm_mm * mm)
+{
+       struct list_head *bnode = mm->free_stack.next;
+       struct drm_mm_node *entry;
+       struct drm_mm_node *next;
+
+       entry = list_entry(bnode, struct drm_mm_node, free_stack);
+
+       if (entry->node_list.next != &mm->node_list ||
+           entry->free_stack.next != &mm->free_stack) {
+               DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+               return;
+       }
+
+       list_del(&entry->free_stack);
+       list_del(&entry->node_list);
+       kfree(entry);
+
+       spin_lock(&mm->unused_lock);
+       list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
+               list_del(&entry->free_stack);
+               kfree(entry);
+               --mm->num_unused;
+       }
+       spin_unlock(&mm->unused_lock);
+
+       BUG_ON(mm->num_unused != 0);
+}
+EXPORT_SYMBOL(drm_mm_takedown);
+
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+       struct drm_mm_node *entry;
+       int total_used = 0, total_free = 0, total = 0;
+
+       list_for_each_entry(entry, &mm->node_list, node_list) {
+               printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+                       prefix, entry->start, entry->start + entry->size,
+                       entry->size, entry->free ? "free" : "used");
+               total += entry->size;
+               if (entry->free)
+                       total_free += entry->size;
+               else
+                       total_used += entry->size;
+       }
+       printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+               total_used, total_free);
+}
+EXPORT_SYMBOL(drm_mm_debug_table);
+
+#if defined(CONFIG_DEBUG_FS)
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+{
+       struct drm_mm_node *entry;
+       int total_used = 0, total_free = 0, total = 0;
+
+       list_for_each_entry(entry, &mm->node_list, node_list) {
+               seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
+               total += entry->size;
+               if (entry->free)
+                       total_free += entry->size;
+               else
+                       total_used += entry->size;
+       }
+       seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
+       return 0;
+}
+EXPORT_SYMBOL(drm_mm_dump_table);
+#endif
diff --git a/services4/3rdparty/linux_drm/drm_modes.c b/services4/3rdparty/linux_drm/drm_modes.c
new file mode 100644 (file)
index 0000000..58e65f9
--- /dev/null
@@ -0,0 +1,976 @@
+/*
+ * Copyright Â© 1997-2003 by The XFree86 Project, Inc.
+ * Copyright Â© 2007 Dave Airlie
+ * Copyright Â© 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2005-2006 Luc Verhaegen
+ * Copyright (c) 2001, Andy Ritger  aritger@nvidia.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+{
+       DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
+                       "0x%x 0x%x\n",
+               mode->base.id, mode->name, mode->vrefresh, mode->clock,
+               mode->hdisplay, mode->hsync_start,
+               mode->hsync_end, mode->htotal,
+               mode->vdisplay, mode->vsync_start,
+               mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+}
+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
+
+/**
+ * drm_cvt_mode -create a modeline based on CVT algorithm
+ * @dev: DRM device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh  : vrefresh rate
+ * @reduced : Whether the GTF calculation is simplified
+ * @interlaced:Whether the interlace is supported
+ *
+ * LOCKING:
+ * none.
+ *
+ * return the modeline based on CVT algorithm
+ *
+ * This function is called to generate the modeline based on CVT algorithm
+ * according to the hdisplay, vdisplay, vrefresh.
+ * It is based from the VESA(TM) Coordinated Video Timing Generator by
+ * Graham Loveridge April 9, 2003 available at
+ * http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls 
+ *
+ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
+ * What I have done is to translate it by using integer calculation.
+ */
+#define HV_FACTOR                      1000
+struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
+                                     int vdisplay, int vrefresh,
+                                     bool reduced, bool interlaced, bool margins)
+{
+       /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define        CVT_MARGIN_PERCENTAGE           18
+       /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define        CVT_H_GRANULARITY               8
+       /* 3) Minimum vertical porch (lines) - default 3 */
+#define        CVT_MIN_V_PORCH                 3
+       /* 4) Minimum number of vertical back porch lines - default 6 */
+#define        CVT_MIN_V_BPORCH                6
+       /* Pixel Clock step (kHz) */
+#define CVT_CLOCK_STEP                 250
+       struct drm_display_mode *drm_mode;
+       unsigned int vfieldrate, hperiod;
+       int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
+       int interlace;
+
+       /* allocate the drm_display_mode structure. If failure, we will
+        * return directly
+        */
+       drm_mode = drm_mode_create(dev);
+       if (!drm_mode)
+               return NULL;
+
+       /* the CVT default refresh rate is 60Hz */
+       if (!vrefresh)
+               vrefresh = 60;
+
+       /* the required field fresh rate */
+       if (interlaced)
+               vfieldrate = vrefresh * 2;
+       else
+               vfieldrate = vrefresh;
+
+       /* horizontal pixels */
+       hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
+
+       /* determine the left&right borders */
+       hmargin = 0;
+       if (margins) {
+               hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+               hmargin -= hmargin % CVT_H_GRANULARITY;
+       }
+       /* find the total active pixels */
+       drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
+
+       /* find the number of lines per field */
+       if (interlaced)
+               vdisplay_rnd = vdisplay / 2;
+       else
+               vdisplay_rnd = vdisplay;
+
+       /* find the top & bottom borders */
+       vmargin = 0;
+       if (margins)
+               vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+
+       drm_mode->vdisplay = vdisplay + 2 * vmargin;
+
+       /* Interlaced */
+       if (interlaced)
+               interlace = 1;
+       else
+               interlace = 0;
+
+       /* Determine VSync Width from aspect ratio */
+       if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
+               vsync = 4;
+       else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
+               vsync = 5;
+       else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
+               vsync = 6;
+       else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
+               vsync = 7;
+       else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
+               vsync = 7;
+       else /* custom */
+               vsync = 10;
+
+       if (!reduced) {
+               /* simplify the GTF calculation */
+               /* 4) Minimum time of vertical sync + back porch interval (µs)
+                * default 550.0
+                */
+               int tmp1, tmp2;
+#define CVT_MIN_VSYNC_BP       550
+               /* 3) Nominal HSync width (% of line period) - default 8 */
+#define CVT_HSYNC_PERCENTAGE   8
+               unsigned int hblank_percentage;
+               int vsyncandback_porch, vback_porch, hblank;
+
+               /* estimated the horizontal period */
+               tmp1 = HV_FACTOR * 1000000  -
+                               CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
+               tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
+                               interlace;
+               hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
+
+               tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
+               /* 9. Find number of lines in sync + backporch */
+               if (tmp1 < (vsync + CVT_MIN_V_PORCH))
+                       vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
+               else
+                       vsyncandback_porch = tmp1;
+               /* 10. Find number of lines in back porch */
+               vback_porch = vsyncandback_porch - vsync;
+               drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
+                               vsyncandback_porch + CVT_MIN_V_PORCH;
+               /* 5) Definition of Horizontal blanking time limitation */
+               /* Gradient (%/kHz) - default 600 */
+#define CVT_M_FACTOR   600
+               /* Offset (%) - default 40 */
+#define CVT_C_FACTOR   40
+               /* Blanking time scaling factor - default 128 */
+#define CVT_K_FACTOR   128
+               /* Scaling factor weighting - default 20 */
+#define CVT_J_FACTOR   20
+#define CVT_M_PRIME    (CVT_M_FACTOR * CVT_K_FACTOR / 256)
+#define CVT_C_PRIME    ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
+                        CVT_J_FACTOR)
+               /* 12. Find ideal blanking duty cycle from formula */
+               hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
+                                       hperiod / 1000;
+               /* 13. Blanking time */
+               if (hblank_percentage < 20 * HV_FACTOR)
+                       hblank_percentage = 20 * HV_FACTOR;
+               hblank = drm_mode->hdisplay * hblank_percentage /
+                        (100 * HV_FACTOR - hblank_percentage);
+               hblank -= hblank % (2 * CVT_H_GRANULARITY);
+               /* 14. find the total pixes per line */
+               drm_mode->htotal = drm_mode->hdisplay + hblank;
+               drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
+               drm_mode->hsync_start = drm_mode->hsync_end -
+                       (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
+               drm_mode->hsync_start += CVT_H_GRANULARITY -
+                       drm_mode->hsync_start % CVT_H_GRANULARITY;
+               /* fill the Vsync values */
+               drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
+               drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+       } else {
+               /* Reduced blanking */
+               /* Minimum vertical blanking interval time (µs)- default 460 */
+#define CVT_RB_MIN_VBLANK      460
+               /* Fixed number of clocks for horizontal sync */
+#define CVT_RB_H_SYNC          32
+               /* Fixed number of clocks for horizontal blanking */
+#define CVT_RB_H_BLANK         160
+               /* Fixed number of lines for vertical front porch - default 3*/
+#define CVT_RB_VFPORCH         3
+               int vbilines;
+               int tmp1, tmp2;
+               /* 8. Estimate Horizontal period. */
+               tmp1 = HV_FACTOR * 1000000 -
+                       CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
+               tmp2 = vdisplay_rnd + 2 * vmargin;
+               hperiod = tmp1 / (tmp2 * vfieldrate);
+               /* 9. Find number of lines in vertical blanking */
+               vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
+               /* 10. Check if vertical blanking is sufficient */
+               if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
+                       vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
+               /* 11. Find total number of lines in vertical field */
+               drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
+               /* 12. Find total number of pixels in a line */
+               drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
+               /* Fill in HSync values */
+               drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
+               drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+               /* Fill in VSync values */
+               drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+               drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+       }
+       /* 15/13. Find pixel clock frequency (kHz for xf86) */
+       drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
+       drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+       /* 18/16. Find actual vertical frame frequency */
+       /* ignore - just set the mode flag for interlaced */
+       if (interlaced) {
+               drm_mode->vtotal *= 2;
+               drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       }
+       /* Fill the mode line name */
+       drm_mode_set_name(drm_mode);
+       if (reduced)
+               drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
+                                       DRM_MODE_FLAG_NVSYNC);
+       else
+               drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
+                                       DRM_MODE_FLAG_NHSYNC);
+
+       return drm_mode;
+}
+EXPORT_SYMBOL(drm_cvt_mode);
+
+/**
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
+ *
+ * @dev                :drm device
+ * @hdisplay   :hdisplay size
+ * @vdisplay   :vdisplay size
+ * @vrefresh   :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins    :desired margin size
+ * @GTF_[MCKJ]  :extended GTF formula parameters
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on full GTF algorithm.
+ *
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two.  For a C of 40, pass in 80.
+ */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+                    int vrefresh, bool interlaced, int margins,
+                    int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{      /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define        GTF_MARGIN_PERCENTAGE           18
+       /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define        GTF_CELL_GRAN                   8
+       /* 3) Minimum vertical porch (lines) - default 3 */
+#define        GTF_MIN_V_PORCH                 1
+       /* width of vsync in lines */
+#define V_SYNC_RQD                     3
+       /* width of hsync as % of total line */
+#define H_SYNC_PERCENT                 8
+       /* min time of vsync + back porch (microsec) */
+#define MIN_VSYNC_PLUS_BP              550
+       /* C' and M' are part of the Blanking Duty Cycle computation */
+#define GTF_C_PRIME    ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME    (GTF_K * GTF_M / 256)
+       struct drm_display_mode *drm_mode;
+       unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
+       int top_margin, bottom_margin;
+       int interlace;
+       unsigned int hfreq_est;
+       int vsync_plus_bp, vback_porch;
+       unsigned int vtotal_lines, vfieldrate_est, hperiod;
+       unsigned int vfield_rate, vframe_rate;
+       int left_margin, right_margin;
+       unsigned int total_active_pixels, ideal_duty_cycle;
+       unsigned int hblank, total_pixels, pixel_freq;
+       int hsync, hfront_porch, vodd_front_porch_lines;
+       unsigned int tmp1, tmp2;
+
+       drm_mode = drm_mode_create(dev);
+       if (!drm_mode)
+               return NULL;
+
+       /* 1. In order to give correct results, the number of horizontal
+        * pixels requested is first processed to ensure that it is divisible
+        * by the character size, by rounding it to the nearest character
+        * cell boundary:
+        */
+       hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+       hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
+
+       /* 2. If interlace is requested, the number of vertical lines assumed
+        * by the calculation must be halved, as the computation calculates
+        * the number of vertical lines per field.
+        */
+       if (interlaced)
+               vdisplay_rnd = vdisplay / 2;
+       else
+               vdisplay_rnd = vdisplay;
+
+       /* 3. Find the frame rate required: */
+       if (interlaced)
+               vfieldrate_rqd = vrefresh * 2;
+       else
+               vfieldrate_rqd = vrefresh;
+
+       /* 4. Find number of lines in Top margin: */
+       top_margin = 0;
+       if (margins)
+               top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+                               1000;
+       /* 5. Find number of lines in bottom margin: */
+       bottom_margin = top_margin;
+
+       /* 6. If interlace is required, then set variable interlace: */
+       if (interlaced)
+               interlace = 1;
+       else
+               interlace = 0;
+
+       /* 7. Estimate the Horizontal frequency */
+       {
+               tmp1 = (1000000  - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
+               tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
+                               2 + interlace;
+               hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
+       }
+
+       /* 8. Find the number of lines in V sync + back porch */
+       /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
+       vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
+       vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
+       /*  9. Find the number of lines in V back porch alone: */
+       vback_porch = vsync_plus_bp - V_SYNC_RQD;
+       /*  10. Find the total number of lines in Vertical field period: */
+       vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
+                       vsync_plus_bp + GTF_MIN_V_PORCH;
+       /*  11. Estimate the Vertical field frequency: */
+       vfieldrate_est = hfreq_est / vtotal_lines;
+       /*  12. Find the actual horizontal period: */
+       hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
+
+       /*  13. Find the actual Vertical field frequency: */
+       vfield_rate = hfreq_est / vtotal_lines;
+       /*  14. Find the Vertical frame frequency: */
+       if (interlaced)
+               vframe_rate = vfield_rate / 2;
+       else
+               vframe_rate = vfield_rate;
+       /*  15. Find number of pixels in left margin: */
+       if (margins)
+               left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+                               1000;
+       else
+               left_margin = 0;
+
+       /* 16.Find number of pixels in right margin: */
+       right_margin = left_margin;
+       /* 17.Find total number of active pixels in image and left and right */
+       total_active_pixels = hdisplay_rnd + left_margin + right_margin;
+       /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
+       ideal_duty_cycle = GTF_C_PRIME * 1000 -
+                               (GTF_M_PRIME * 1000000 / hfreq_est);
+       /* 19.Find the number of pixels in the blanking time to the nearest
+        * double character cell: */
+       hblank = total_active_pixels * ideal_duty_cycle /
+                       (100000 - ideal_duty_cycle);
+       hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
+       hblank = hblank * 2 * GTF_CELL_GRAN;
+       /* 20.Find total number of pixels: */
+       total_pixels = total_active_pixels + hblank;
+       /* 21.Find pixel clock frequency: */
+       pixel_freq = total_pixels * hfreq_est / 1000;
+       /* Stage 1 computations are now complete; I should really pass
+        * the results to another function and do the Stage 2 computations,
+        * but I only need a few more values so I'll just append the
+        * computations here for now */
+       /* 17. Find the number of pixels in the horizontal sync period: */
+       hsync = H_SYNC_PERCENT * total_pixels / 100;
+       hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+       hsync = hsync * GTF_CELL_GRAN;
+       /* 18. Find the number of pixels in horizontal front porch period */
+       hfront_porch = hblank / 2 - hsync;
+       /*  36. Find the number of lines in the odd front porch period: */
+       vodd_front_porch_lines = GTF_MIN_V_PORCH ;
+
+       /* finally, pack the results in the mode struct */
+       drm_mode->hdisplay = hdisplay_rnd;
+       drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
+       drm_mode->hsync_end = drm_mode->hsync_start + hsync;
+       drm_mode->htotal = total_pixels;
+       drm_mode->vdisplay = vdisplay_rnd;
+       drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
+       drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
+       drm_mode->vtotal = vtotal_lines;
+
+       drm_mode->clock = pixel_freq;
+
+       if (interlaced) {
+               drm_mode->vtotal *= 2;
+               drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       }
+
+       drm_mode_set_name(drm_mode);
+       if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+               drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+       else
+               drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
+       return drm_mode;
+}
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev                :drm device
+ * @hdisplay   :hdisplay size
+ * @vdisplay   :vdisplay size
+ * @vrefresh   :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins    :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ *     GTF Spreadsheet by Andy Morrish (1/5/97)
+ *     available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+            bool lace, int margins)
+{
+       return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+                                   margins, 600, 40 * 2, 128, 20 * 2);
+}
+EXPORT_SYMBOL(drm_gtf_mode);
+
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void drm_mode_set_name(struct drm_display_mode *mode)
+{
+       bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+       snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+                mode->hdisplay, mode->vdisplay,
+                interlaced ? "i" : "");
+}
+EXPORT_SYMBOL(drm_mode_set_name);
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+       struct list_head *entry, *tmp;
+
+       list_for_each_safe(entry, tmp, head) {
+               list_move_tail(entry, new);
+       }
+}
+EXPORT_SYMBOL(drm_mode_list_concat);
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int drm_mode_width(struct drm_display_mode *mode)
+{
+       return mode->hdisplay;
+
+}
+EXPORT_SYMBOL(drm_mode_width);
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int drm_mode_height(struct drm_display_mode *mode)
+{
+       return mode->vdisplay;
+}
+EXPORT_SYMBOL(drm_mode_height);
+
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(struct drm_display_mode *mode)
+{
+       unsigned int calc_val;
+
+       if (mode->hsync)
+               return mode->hsync;
+
+       if (mode->htotal < 0)
+               return 0;
+
+       calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+       calc_val += 500;                                /* round to 1000Hz */
+       calc_val /= 1000;                               /* truncate to kHz */
+
+       return calc_val;
+}
+EXPORT_SYMBOL(drm_mode_hsync);
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
+ *
+ * FIXME: why is this needed?  shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate. It will be the result of actual value plus 0.5.
+ * If it is 70.288, it will return 70Hz.
+ * If it is 59.6, it will return 60Hz.
+ */
+int drm_mode_vrefresh(struct drm_display_mode *mode)
+{
+       int refresh = 0;
+       unsigned int calc_val;
+
+       if (mode->vrefresh > 0)
+               refresh = mode->vrefresh;
+       else if (mode->htotal > 0 && mode->vtotal > 0) {
+               int vtotal;
+               vtotal = mode->vtotal;
+               /* work out vrefresh the value will be x1000 */
+               calc_val = (mode->clock * 1000);
+               calc_val /= mode->htotal;
+               refresh = (calc_val + vtotal / 2) / vtotal;
+
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       refresh *= 2;
+               if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+                       refresh /= 2;
+               if (mode->vscan > 1)
+                       refresh /= mode->vscan;
+       }
+       return refresh;
+}
+EXPORT_SYMBOL(drm_mode_vrefresh);
+
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+       if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+               return;
+
+       p->crtc_hdisplay = p->hdisplay;
+       p->crtc_hsync_start = p->hsync_start;
+       p->crtc_hsync_end = p->hsync_end;
+       p->crtc_htotal = p->htotal;
+       p->crtc_hskew = p->hskew;
+       p->crtc_vdisplay = p->vdisplay;
+       p->crtc_vsync_start = p->vsync_start;
+       p->crtc_vsync_end = p->vsync_end;
+       p->crtc_vtotal = p->vtotal;
+
+       if (p->flags & DRM_MODE_FLAG_INTERLACE) {
+               if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+                       p->crtc_vdisplay /= 2;
+                       p->crtc_vsync_start /= 2;
+                       p->crtc_vsync_end /= 2;
+                       p->crtc_vtotal /= 2;
+               }
+
+               p->crtc_vtotal |= 1;
+       }
+
+       if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+               p->crtc_vdisplay *= 2;
+               p->crtc_vsync_start *= 2;
+               p->crtc_vsync_end *= 2;
+               p->crtc_vtotal *= 2;
+       }
+
+       if (p->vscan > 1) {
+               p->crtc_vdisplay *= p->vscan;
+               p->crtc_vsync_start *= p->vscan;
+               p->crtc_vsync_end *= p->vscan;
+               p->crtc_vtotal *= p->vscan;
+       }
+
+       p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+       p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+       p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+       p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+
+       p->crtc_hadjusted = false;
+       p->crtc_vadjusted = false;
+}
+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it.  Used to create new instances of established modes.
+ */
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+                                           struct drm_display_mode *mode)
+{
+       struct drm_display_mode *nmode;
+       int new_id;
+
+       nmode = drm_mode_create(dev);
+       if (!nmode)
+               return NULL;
+
+       new_id = nmode->base.id;
+       *nmode = *mode;
+       nmode->base.id = new_id;
+       INIT_LIST_HEAD(&nmode->head);
+       return nmode;
+}
+EXPORT_SYMBOL(drm_mode_duplicate);
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+{
+       /* do clock check convert to PICOS so fb modes get matched
+        * the same */
+       if (mode1->clock && mode2->clock) {
+               if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
+                       return false;
+       } else if (mode1->clock != mode2->clock)
+               return false;
+
+       if (mode1->hdisplay == mode2->hdisplay &&
+           mode1->hsync_start == mode2->hsync_start &&
+           mode1->hsync_end == mode2->hsync_end &&
+           mode1->htotal == mode2->htotal &&
+           mode1->hskew == mode2->hskew &&
+           mode1->vdisplay == mode2->vdisplay &&
+           mode1->vsync_start == mode2->vsync_start &&
+           mode1->vsync_end == mode2->vsync_end &&
+           mode1->vtotal == mode2->vtotal &&
+           mode1->vscan == mode2->vscan &&
+           mode1->flags == mode2->flags)
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits.  Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+void drm_mode_validate_size(struct drm_device *dev,
+                           struct list_head *mode_list,
+                           int maxX, int maxY, int maxPitch)
+{
+       struct drm_display_mode *mode;
+
+       list_for_each_entry(mode, mode_list, head) {
+               if (maxPitch > 0 && mode->hdisplay > maxPitch)
+                       mode->status = MODE_BAD_WIDTH;
+
+               if (maxX > 0 && mode->hdisplay > maxX)
+                       mode->status = MODE_VIRTUAL_X;
+
+               if (maxY > 0 && mode->vdisplay > maxY)
+                       mode->status = MODE_VIRTUAL_Y;
+       }
+}
+EXPORT_SYMBOL(drm_mode_validate_size);
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question.  This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+void drm_mode_validate_clocks(struct drm_device *dev,
+                             struct list_head *mode_list,
+                             int *min, int *max, int n_ranges)
+{
+       struct drm_display_mode *mode;
+       int i;
+
+       list_for_each_entry(mode, mode_list, head) {
+               bool good = false;
+               for (i = 0; i < n_ranges; i++) {
+                       if (mode->clock >= min[i] && mode->clock <= max[i]) {
+                               good = true;
+                               break;
+                       }
+               }
+               if (!good)
+                       mode->status = MODE_CLOCK_RANGE;
+       }
+}
+EXPORT_SYMBOL(drm_mode_validate_clocks);
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list.  If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void drm_mode_prune_invalid(struct drm_device *dev,
+                           struct list_head *mode_list, bool verbose)
+{
+       struct drm_display_mode *mode, *t;
+
+       list_for_each_entry_safe(mode, t, mode_list, head) {
+               if (mode->status != MODE_OK) {
+                       list_del(&mode->head);
+                       if (verbose) {
+                               drm_mode_debug_printmodeline(mode);
+                               DRM_DEBUG_KMS("Not using %s mode %d\n",
+                                       mode->name, mode->status);
+                       }
+                       drm_mode_destroy(dev, mode);
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_prune_invalid);
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @priv: unused
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
+{
+       struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+       struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+       int diff;
+
+       diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+               ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+       if (diff)
+               return diff;
+       diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+       if (diff)
+               return diff;
+       diff = b->clock - a->clock;
+       return diff;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+void drm_mode_sort(struct list_head *mode_list)
+{
+       list_sort(NULL, mode_list, drm_mode_compare);
+}
+EXPORT_SYMBOL(drm_mode_sort);
+
+/**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void drm_mode_connector_list_update(struct drm_connector *connector)
+{
+       struct drm_display_mode *mode;
+       struct drm_display_mode *pmode, *pt;
+       int found_it;
+
+       list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
+                                head) {
+               found_it = 0;
+               /* go through current modes checking for the new probed mode */
+               list_for_each_entry(mode, &connector->modes, head) {
+                       if (drm_mode_equal(pmode, mode)) {
+                               found_it = 1;
+                               /* if equal delete the probed mode */
+                               mode->status = pmode->status;
+                               /* Merge type bits together */
+                               mode->type |= pmode->type;
+                               list_del(&pmode->head);
+                               drm_mode_destroy(connector->dev, pmode);
+                               break;
+                       }
+               }
+
+               if (!found_it) {
+                       list_move_tail(&pmode->head, &connector->modes);
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_connector_list_update);
diff --git a/services4/3rdparty/linux_drm/drm_pci.c b/services4/3rdparty/linux_drm/drm_pci.c
new file mode 100644 (file)
index 0000000..f5bd9e5
--- /dev/null
@@ -0,0 +1,274 @@
+/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
+/**
+ * \file drm_pci.c
+ * \brief Functions and ioctls to manage PCI memory
+ *
+ * \warning These interfaces aren't stable yet.
+ *
+ * \todo Implement the remaining ioctl's for the PCI pools.
+ * \todo The wrappers here are so thin that they would be better off inlined..
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ * \author Leif Delgass <ldelgass@retinalburn.net>
+ */
+
+/*
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include "drmP.h"
+
+/**********************************************************************/
+/** \name PCI memory */
+/*@{*/
+
+/**
+ * \brief Allocate a PCI consistent memory block, for DMA.
+ */
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
+{
+       drm_dma_handle_t *dmah;
+#if 1
+       unsigned long addr;
+       size_t sz;
+#endif
+
+       /* pci_alloc_consistent only guarantees alignment to the smallest
+        * PAGE_SIZE order which is greater than or equal to the requested size.
+        * Return NULL here for now to make sure nobody tries for larger alignment
+        */
+       if (align > size)
+               return NULL;
+
+       dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
+       if (!dmah)
+               return NULL;
+
+       dmah->size = size;
+       dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+
+       if (dmah->vaddr == NULL) {
+               kfree(dmah);
+               return NULL;
+       }
+
+       memset(dmah->vaddr, 0, size);
+
+       /* XXX - Is virt_to_page() legal for consistent mem? */
+       /* Reserve */
+       for (addr = (unsigned long)dmah->vaddr, sz = size;
+            sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+               SetPageReserved(virt_to_page(addr));
+       }
+
+       return dmah;
+}
+
+EXPORT_SYMBOL(drm_pci_alloc);
+
+/**
+ * \brief Free a PCI consistent memory block without freeing its descriptor.
+ *
+ * This function is for internal use in the Linux-specific DRM core code.
+ */
+void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+#if 1
+       unsigned long addr;
+       size_t sz;
+#endif
+
+       if (dmah->vaddr) {
+               /* XXX - Is virt_to_page() legal for consistent mem? */
+               /* Unreserve */
+               for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
+                    sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+                       ClearPageReserved(virt_to_page(addr));
+               }
+               dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+                                 dmah->busaddr);
+       }
+}
+
+/**
+ * \brief Free a PCI consistent memory block
+ */
+void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+       __drm_pci_free(dev, dmah);
+       kfree(dmah);
+}
+
+EXPORT_SYMBOL(drm_pci_free);
+
+#ifdef CONFIG_PCI
+/**
+ * Register.
+ *
+ * \param pdev - PCI device structure
+ * \param ent entry from the PCI ID table with device type flags
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+                   struct drm_driver *driver)
+{
+       struct drm_device *dev;
+       int ret;
+
+       DRM_DEBUG("\n");
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               goto err_g1;
+
+       pci_set_master(pdev);
+
+       dev->pdev = pdev;
+       dev->dev = &pdev->dev;
+
+       dev->pci_device = pdev->device;
+       dev->pci_vendor = pdev->vendor;
+
+#ifdef __alpha__
+       dev->hose = pdev->sysdata;
+#endif
+
+       mutex_lock(&drm_global_mutex);
+
+       if ((ret = drm_fill_in_dev(dev, ent, driver))) {
+               printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+               goto err_g2;
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               pci_set_drvdata(pdev, dev);
+               ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+               if (ret)
+                       goto err_g2;
+       }
+
+       if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
+               goto err_g3;
+
+       if (dev->driver->load) {
+               ret = dev->driver->load(dev, ent->driver_data);
+               if (ret)
+                       goto err_g4;
+       }
+
+       /* setup the grouping for the legacy output */
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = drm_mode_group_init_legacy_group(dev,
+                                               &dev->primary->mode_group);
+               if (ret)
+                       goto err_g4;
+       }
+
+       list_add_tail(&dev->driver_item, &driver->device_list);
+
+       DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+                driver->name, driver->major, driver->minor, driver->patchlevel,
+                driver->date, pci_name(pdev), dev->primary->index);
+
+       mutex_unlock(&drm_global_mutex);
+       return 0;
+
+err_g4:
+       drm_put_minor(&dev->primary);
+err_g3:
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_put_minor(&dev->control);
+err_g2:
+       pci_disable_device(pdev);
+err_g1:
+       kfree(dev);
+       mutex_unlock(&drm_global_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(drm_get_pci_dev);
+
+/**
+ * PCI device initialization. Called via drm_init at module load time,
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes a drm_device structures,registering the
+ * stubs and initializing the AGP device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_pci_init(struct drm_driver *driver)
+{
+       struct pci_dev *pdev = NULL;
+       const struct pci_device_id *pid;
+       int i;
+
+       if (driver->driver_features & DRIVER_MODESET)
+               return pci_register_driver(&driver->pci_driver);
+
+       /* If not using KMS, fall back to stealth mode manual scanning. */
+       for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
+               pid = &driver->pci_driver.id_table[i];
+
+               /* Loop around setting up a DRM device for each PCI device
+                * matching our ID and device class.  If we had the internal
+                * function that pci_get_subsys and pci_get_class used, we'd
+                * be able to just pass pid in instead of doing a two-stage
+                * thing.
+                */
+               pdev = NULL;
+               while ((pdev =
+                       pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
+                                      pid->subdevice, pdev)) != NULL) {
+                       if ((pdev->class & pid->class_mask) != pid->class)
+                               continue;
+
+                       /* stealth mode requires a manual probe */
+                       pci_dev_get(pdev);
+                       drm_get_pci_dev(pdev, pid, driver);
+               }
+       }
+       return 0;
+}
+
+#else
+
+int drm_pci_init(struct drm_driver *driver)
+{
+       return -1;
+}
+
+#endif
+/*@}*/
diff --git a/services4/3rdparty/linux_drm/drm_platform.c b/services4/3rdparty/linux_drm/drm_platform.c
new file mode 100644 (file)
index 0000000..92d1d0f
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Derived from drm_pci.c
+ *
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * Copyright (c) 2009, Code Aurora Forum.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Register.
+ *
+ * \param platdev - Platform device struture
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+
+int drm_get_platform_dev(struct platform_device *platdev,
+                        struct drm_driver *driver)
+{
+       struct drm_device *dev;
+       int ret;
+
+       DRM_DEBUG("\n");
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       dev->platformdev = platdev;
+       dev->dev = &platdev->dev;
+
+       mutex_lock(&drm_global_mutex);
+
+       ret = drm_fill_in_dev(dev, NULL, driver);
+
+       if (ret) {
+               printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+               goto err_g1;
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               dev_set_drvdata(&platdev->dev, dev);
+               ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+               if (ret)
+                       goto err_g1;
+       }
+
+       ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+       if (ret)
+               goto err_g2;
+
+       if (dev->driver->load) {
+               ret = dev->driver->load(dev, 0);
+               if (ret)
+                       goto err_g3;
+       }
+
+       /* setup the grouping for the legacy output */
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = drm_mode_group_init_legacy_group(dev,
+                               &dev->primary->mode_group);
+               if (ret)
+                       goto err_g3;
+       }
+
+       list_add_tail(&dev->driver_item, &driver->device_list);
+
+       mutex_unlock(&drm_global_mutex);
+
+       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+                driver->name, driver->major, driver->minor, driver->patchlevel,
+                driver->date, dev->primary->index);
+
+       return 0;
+
+err_g3:
+       drm_put_minor(&dev->primary);
+err_g2:
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_put_minor(&dev->control);
+err_g1:
+       kfree(dev);
+       mutex_unlock(&drm_global_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(drm_get_platform_dev);
+
+/**
+ * Platform device initialization. Called via drm_init at module load time,
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes a drm_device structures,registering the
+ * stubs
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+
+int drm_platform_init(struct drm_driver *driver)
+{
+       return drm_get_platform_dev(driver->platform_device, driver);
+}
diff --git a/services4/3rdparty/linux_drm/drm_proc.c b/services4/3rdparty/linux_drm/drm_proc.c
new file mode 100644 (file)
index 0000000..9e5b07e
--- /dev/null
@@ -0,0 +1,221 @@
+/**
+ * \file drm_proc.c
+ * /proc support for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * \par Acknowledgements:
+ *    Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
+ *    the problem with the proc files not outputting all their information.
+ */
+
+/*
+ * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include "drmP.h"
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+/**
+ * Proc file list.
+ */
+static struct drm_info_list drm_proc_list[] = {
+       {"name", drm_name_info, 0},
+       {"vm", drm_vm_info, 0},
+       {"clients", drm_clients_info, 0},
+       {"queues", drm_queues_info, 0},
+       {"bufs", drm_bufs_info, 0},
+       {"gem_names", drm_gem_name_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+       {"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
+
+static int drm_proc_open(struct inode *inode, struct file *file)
+{
+       struct drm_info_node* node = PDE(inode)->data;
+
+       return single_open(file, node->info_ent->show, node);
+}
+
+static const struct file_operations drm_proc_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_proc_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+
+/**
+ * Initialize a given set of proc files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI proc dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of proc files represented by an array of
+ * gdm_proc_lists in the given root directory.
+ */
+int drm_proc_create_files(struct drm_info_list *files, int count,
+                         struct proc_dir_entry *root, struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+       struct proc_dir_entry *ent;
+       struct drm_info_node *tmp;
+       char name[64];
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+               u32 features = files[i].driver_features;
+
+               if (features != 0 &&
+                   (dev->driver->driver_features & features) != features)
+                       continue;
+
+               tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+               if (tmp == NULL) {
+                       ret = -1;
+                       goto fail;
+               }
+               tmp->minor = minor;
+               tmp->info_ent = &files[i];
+               list_add(&tmp->list, &minor->proc_nodes.list);
+
+               ent = proc_create_data(files[i].name, S_IRUGO, root,
+                                      &drm_proc_fops, tmp);
+               if (!ent) {
+                       DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
+                                 name, files[i].name);
+                       list_del(&tmp->list);
+                       kfree(tmp);
+                       ret = -1;
+                       goto fail;
+               }
+
+       }
+       return 0;
+
+fail:
+       for (i = 0; i < count; i++)
+               remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
+       return ret;
+}
+
+/**
+ * Initialize the DRI proc filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
+ * \param root DRI proc dir entry.
+ * \param dev_root resulting DRI device proc dir entry.
+ * \return root entry pointer on success, or NULL on failure.
+ *
+ * Create the DRI proc root entry "/proc/dri", the device proc root entry
+ * "/proc/dri/%minor%/", and each entry in proc_list as
+ * "/proc/dri/%minor%/%name%".
+ */
+int drm_proc_init(struct drm_minor *minor, int minor_id,
+                 struct proc_dir_entry *root)
+{
+       char name[64];
+       int ret;
+
+       INIT_LIST_HEAD(&minor->proc_nodes.list);
+       sprintf(name, "%d", minor_id);
+       minor->proc_root = proc_mkdir(name, root);
+       if (!minor->proc_root) {
+               DRM_ERROR("Cannot create /proc/dri/%s\n", name);
+               return -1;
+       }
+
+       ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
+                                   minor->proc_root, minor);
+       if (ret) {
+               remove_proc_entry(name, root);
+               minor->proc_root = NULL;
+               DRM_ERROR("Failed to create core drm proc files\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+int drm_proc_remove_files(struct drm_info_list *files, int count,
+                         struct drm_minor *minor)
+{
+       struct list_head *pos, *q;
+       struct drm_info_node *tmp;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               list_for_each_safe(pos, q, &minor->proc_nodes.list) {
+                       tmp = list_entry(pos, struct drm_info_node, list);
+                       if (tmp->info_ent == &files[i]) {
+                               remove_proc_entry(files[i].name,
+                                                 minor->proc_root);
+                               list_del(pos);
+                               kfree(tmp);
+                       }
+               }
+       }
+       return 0;
+}
+
+/**
+ * Cleanup the proc filesystem resources.
+ *
+ * \param minor device minor number.
+ * \param root DRI proc dir entry.
+ * \param dev_root DRI device proc dir entry.
+ * \return always zero.
+ *
+ * Remove all proc entries created by proc_init().
+ */
+int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
+{
+       char name[64];
+
+       if (!root || !minor->proc_root)
+               return 0;
+
+       drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
+
+       sprintf(name, "%d", minor->index);
+       remove_proc_entry(name, root);
+
+       return 0;
+}
+
diff --git a/services4/3rdparty/linux_drm/drm_scatter.c b/services4/3rdparty/linux_drm/drm_scatter.c
new file mode 100644 (file)
index 0000000..d15e09b
--- /dev/null
@@ -0,0 +1,217 @@
+/**
+ * \file drm_scatter.c
+ * IOCTLs to manage scatter/gather memory
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include "drmP.h"
+
+#define DEBUG_SCATTER 0
+
+static inline void *drm_vmalloc_dma(unsigned long size)
+{
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+       return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
+#else
+       return vmalloc_32(size);
+#endif
+}
+
+void drm_sg_cleanup(struct drm_sg_mem * entry)
+{
+       struct page *page;
+       int i;
+
+       for (i = 0; i < entry->pages; i++) {
+               page = entry->pagelist[i];
+               if (page)
+                       ClearPageReserved(page);
+       }
+
+       vfree(entry->virtual);
+
+       kfree(entry->busaddr);
+       kfree(entry->pagelist);
+       kfree(entry);
+}
+
+#ifdef _LP64
+# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
+#else
+# define ScatterHandle(x) (unsigned int)(x)
+#endif
+
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+{
+       struct drm_sg_mem *entry;
+       unsigned long pages, i, j;
+
+       DRM_DEBUG("\n");
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       if (dev->sg)
+               return -EINVAL;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       memset(entry, 0, sizeof(*entry));
+       pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+       DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
+
+       entry->pages = pages;
+       entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL);
+       if (!entry->pagelist) {
+               kfree(entry);
+               return -ENOMEM;
+       }
+
+       memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
+
+       entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL);
+       if (!entry->busaddr) {
+               kfree(entry->pagelist);
+               kfree(entry);
+               return -ENOMEM;
+       }
+       memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
+
+       entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+       if (!entry->virtual) {
+               kfree(entry->busaddr);
+               kfree(entry->pagelist);
+               kfree(entry);
+               return -ENOMEM;
+       }
+
+       /* This also forces the mapping of COW pages, so our page list
+        * will be valid.  Please don't remove it...
+        */
+       memset(entry->virtual, 0, pages << PAGE_SHIFT);
+
+       entry->handle = ScatterHandle((unsigned long)entry->virtual);
+
+       DRM_DEBUG("handle  = %08lx\n", entry->handle);
+       DRM_DEBUG("virtual = %p\n", entry->virtual);
+
+       for (i = (unsigned long)entry->virtual, j = 0; j < pages;
+            i += PAGE_SIZE, j++) {
+               entry->pagelist[j] = vmalloc_to_page((void *)i);
+               if (!entry->pagelist[j])
+                       goto failed;
+               SetPageReserved(entry->pagelist[j]);
+       }
+
+       request->handle = entry->handle;
+
+       dev->sg = entry;
+
+#if DEBUG_SCATTER
+       /* Verify that each page points to its virtual address, and vice
+        * versa.
+        */
+       {
+               int error = 0;
+
+               for (i = 0; i < pages; i++) {
+                       unsigned long *tmp;
+
+                       tmp = page_address(entry->pagelist[i]);
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               *tmp = 0xcafebabe;
+                       }
+                       tmp = (unsigned long *)((u8 *) entry->virtual +
+                                               (PAGE_SIZE * i));
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               if (*tmp != 0xcafebabe && error == 0) {
+                                       error = 1;
+                                       DRM_ERROR("Scatter allocation error, "
+                                                 "pagelist does not match "
+                                                 "virtual mapping\n");
+                               }
+                       }
+                       tmp = page_address(entry->pagelist[i]);
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               *tmp = 0;
+                       }
+               }
+               if (error == 0)
+                       DRM_ERROR("Scatter allocation matches pagelist\n");
+       }
+#endif
+
+       return 0;
+
+      failed:
+       drm_sg_cleanup(entry);
+       return -ENOMEM;
+}
+
+int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_scatter_gather *request = data;
+
+       return drm_sg_alloc(dev, request);
+
+}
+
+int drm_sg_free(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_scatter_gather *request = data;
+       struct drm_sg_mem *entry;
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       entry = dev->sg;
+       dev->sg = NULL;
+
+       if (!entry || entry->handle != request->handle)
+               return -EINVAL;
+
+       DRM_DEBUG("virtual  = %p\n", entry->virtual);
+
+       drm_sg_cleanup(entry);
+
+       return 0;
+}
diff --git a/services4/3rdparty/linux_drm/drm_sman.c b/services4/3rdparty/linux_drm/drm_sman.c
new file mode 100644 (file)
index 0000000..463aed9
--- /dev/null
@@ -0,0 +1,352 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple memory manager interface that keeps track on allocate regions on a
+ * per "owner" basis. All regions associated with an "owner" can be released
+ * with a simple call. Typically if the "owner" exists. The owner is any
+ * "unsigned long" identifier. Can typically be a pointer to a file private
+ * struct or a context identifier.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drm_sman.h"
+
+struct drm_owner_item {
+       struct drm_hash_item owner_hash;
+       struct list_head sman_list;
+       struct list_head mem_blocks;
+};
+
+void drm_sman_takedown(struct drm_sman * sman)
+{
+       drm_ht_remove(&sman->user_hash_tab);
+       drm_ht_remove(&sman->owner_hash_tab);
+       kfree(sman->mm);
+}
+
+EXPORT_SYMBOL(drm_sman_takedown);
+
+int
+drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+             unsigned int user_order, unsigned int owner_order)
+{
+       int ret = 0;
+
+       sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
+                                                 sizeof(*sman->mm),
+                                                 GFP_KERNEL);
+       if (!sman->mm) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       sman->num_managers = num_managers;
+       INIT_LIST_HEAD(&sman->owner_items);
+       ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
+       if (ret)
+               goto out1;
+       ret = drm_ht_create(&sman->user_hash_tab, user_order);
+       if (!ret)
+               goto out;
+
+       drm_ht_remove(&sman->owner_hash_tab);
+out1:
+       kfree(sman->mm);
+out:
+       return ret;
+}
+
+EXPORT_SYMBOL(drm_sman_init);
+
+static void *drm_sman_mm_allocate(void *private, unsigned long size,
+                                 unsigned alignment)
+{
+       struct drm_mm *mm = (struct drm_mm *) private;
+       struct drm_mm_node *tmp;
+
+       tmp = drm_mm_search_free(mm, size, alignment, 1);
+       if (!tmp) {
+               return NULL;
+       }
+       tmp = drm_mm_get_block(tmp, size, alignment);
+       return tmp;
+}
+
+static void drm_sman_mm_free(void *private, void *ref)
+{
+       struct drm_mm_node *node = (struct drm_mm_node *) ref;
+
+       drm_mm_put_block(node);
+}
+
+static void drm_sman_mm_destroy(void *private)
+{
+       struct drm_mm *mm = (struct drm_mm *) private;
+       drm_mm_takedown(mm);
+       kfree(mm);
+}
+
+static unsigned long drm_sman_mm_offset(void *private, void *ref)
+{
+       struct drm_mm_node *node = (struct drm_mm_node *) ref;
+       return node->start;
+}
+
+int
+drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+                  unsigned long start, unsigned long size)
+{
+       struct drm_sman_mm *sman_mm;
+       struct drm_mm *mm;
+       int ret;
+
+       BUG_ON(manager >= sman->num_managers);
+
+       sman_mm = &sman->mm[manager];
+       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+       if (!mm) {
+               return -ENOMEM;
+       }
+       sman_mm->private = mm;
+       ret = drm_mm_init(mm, start, size);
+
+       if (ret) {
+               kfree(mm);
+               return ret;
+       }
+
+       sman_mm->allocate = drm_sman_mm_allocate;
+       sman_mm->free = drm_sman_mm_free;
+       sman_mm->destroy = drm_sman_mm_destroy;
+       sman_mm->offset = drm_sman_mm_offset;
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_set_range);
+
+int
+drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
+                    struct drm_sman_mm * allocator)
+{
+       BUG_ON(manager >= sman->num_managers);
+       sman->mm[manager] = *allocator;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_sman_set_manager);
+
+static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
+                                                unsigned long owner)
+{
+       int ret;
+       struct drm_hash_item *owner_hash_item;
+       struct drm_owner_item *owner_item;
+
+       ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
+       if (!ret) {
+               return drm_hash_entry(owner_hash_item, struct drm_owner_item,
+                                     owner_hash);
+       }
+
+       owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
+       if (!owner_item)
+               goto out;
+
+       INIT_LIST_HEAD(&owner_item->mem_blocks);
+       owner_item->owner_hash.key = owner;
+       if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
+               goto out1;
+
+       list_add_tail(&owner_item->sman_list, &sman->owner_items);
+       return owner_item;
+
+out1:
+       kfree(owner_item);
+out:
+       return NULL;
+}
+
+struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
+                                   unsigned long size, unsigned alignment,
+                                   unsigned long owner)
+{
+       void *tmp;
+       struct drm_sman_mm *sman_mm;
+       struct drm_owner_item *owner_item;
+       struct drm_memblock_item *memblock;
+
+       BUG_ON(manager >= sman->num_managers);
+
+       sman_mm = &sman->mm[manager];
+       tmp = sman_mm->allocate(sman_mm->private, size, alignment);
+
+       if (!tmp) {
+               return NULL;
+       }
+
+       memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
+
+       if (!memblock)
+               goto out;
+
+       memblock->mm_info = tmp;
+       memblock->mm = sman_mm;
+       memblock->sman = sman;
+
+       if (drm_ht_just_insert_please
+           (&sman->user_hash_tab, &memblock->user_hash,
+            (unsigned long)memblock, 32, 0, 0))
+               goto out1;
+
+       owner_item = drm_sman_get_owner_item(sman, owner);
+       if (!owner_item)
+               goto out2;
+
+       list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
+
+       return memblock;
+
+out2:
+       drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
+out1:
+       kfree(memblock);
+out:
+       sman_mm->free(sman_mm->private, tmp);
+
+       return NULL;
+}
+
+EXPORT_SYMBOL(drm_sman_alloc);
+
+static void drm_sman_free(struct drm_memblock_item *item)
+{
+       struct drm_sman *sman = item->sman;
+
+       list_del(&item->owner_list);
+       drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
+       item->mm->free(item->mm->private, item->mm_info);
+       kfree(item);
+}
+
+int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
+{
+       struct drm_hash_item *hash_item;
+       struct drm_memblock_item *memblock_item;
+
+       if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
+               return -EINVAL;
+
+       memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
+                                      user_hash);
+       drm_sman_free(memblock_item);
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_free_key);
+
+static void drm_sman_remove_owner(struct drm_sman *sman,
+                                 struct drm_owner_item *owner_item)
+{
+       list_del(&owner_item->sman_list);
+       drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
+       kfree(owner_item);
+}
+
+int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
+{
+
+       struct drm_hash_item *hash_item;
+       struct drm_owner_item *owner_item;
+
+       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+               return -1;
+       }
+
+       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+       if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
+               drm_sman_remove_owner(sman, owner_item);
+               return -1;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_owner_clean);
+
+static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
+                                     struct drm_owner_item *owner_item)
+{
+       struct drm_memblock_item *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
+                                owner_list) {
+               drm_sman_free(entry);
+       }
+       drm_sman_remove_owner(sman, owner_item);
+}
+
+void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
+{
+
+       struct drm_hash_item *hash_item;
+       struct drm_owner_item *owner_item;
+
+       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+
+               return;
+       }
+
+       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+       drm_sman_do_owner_cleanup(sman, owner_item);
+}
+
+EXPORT_SYMBOL(drm_sman_owner_cleanup);
+
+void drm_sman_cleanup(struct drm_sman *sman)
+{
+       struct drm_owner_item *entry, *next;
+       unsigned int i;
+       struct drm_sman_mm *sman_mm;
+
+       list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
+               drm_sman_do_owner_cleanup(sman, entry);
+       }
+       if (sman->mm) {
+               for (i = 0; i < sman->num_managers; ++i) {
+                       sman_mm = &sman->mm[i];
+                       if (sman_mm->private) {
+                               sman_mm->destroy(sman_mm->private);
+                               sman_mm->private = NULL;
+                       }
+               }
+       }
+}
+
+EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/services4/3rdparty/linux_drm/drm_stub.c b/services4/3rdparty/linux_drm/drm_stub.c
new file mode 100644 (file)
index 0000000..cdc89ee
--- /dev/null
@@ -0,0 +1,474 @@
+/**
+ * \file drm_stub.h
+ * Stub support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "drm_core.h"
+
+unsigned int drm_debug = 0;    /* 1 to enable debug output */
+EXPORT_SYMBOL(drm_debug);
+
+MODULE_AUTHOR(CORE_AUTHOR);
+MODULE_DESCRIPTION(CORE_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_PARM_DESC(debug, "Enable debug output");
+
+module_param_named(debug, drm_debug, int, 0600);
+
+struct idr drm_minors_idr;
+
+struct class *drm_class;
+struct proc_dir_entry *drm_proc_root;
+struct dentry *drm_debugfs_root;
+void drm_ut_debug_printk(unsigned int request_level,
+                        const char *prefix,
+                        const char *function_name,
+                        const char *format, ...)
+{
+       va_list args;
+
+       if (drm_debug & request_level) {
+               if (function_name)
+                       printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
+               va_start(args, format);
+               vprintk(format, args);
+               va_end(args);
+       }
+}
+EXPORT_SYMBOL(drm_ut_debug_printk);
+static int drm_minor_get_id(struct drm_device *dev, int type)
+{
+       int new_id;
+       int ret;
+       int base = 0, limit = 63;
+
+       if (type == DRM_MINOR_CONTROL) {
+                base += 64;
+                limit = base + 127;
+        } else if (type == DRM_MINOR_RENDER) {
+                base += 128;
+                limit = base + 255;
+        }
+
+again:
+       if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+       mutex_lock(&dev->struct_mutex);
+       ret = idr_get_new_above(&drm_minors_idr, NULL,
+                               base, &new_id);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret == -EAGAIN) {
+               goto again;
+       } else if (ret) {
+               return ret;
+       }
+
+       if (new_id >= limit) {
+               idr_remove(&drm_minors_idr, new_id);
+               return -EINVAL;
+       }
+       return new_id;
+}
+
+struct drm_master *drm_master_create(struct drm_minor *minor)
+{
+       struct drm_master *master;
+
+       master = kzalloc(sizeof(*master), GFP_KERNEL);
+       if (!master)
+               return NULL;
+
+       kref_init(&master->refcount);
+       spin_lock_init(&master->lock.spinlock);
+       init_waitqueue_head(&master->lock.lock_queue);
+       drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
+       INIT_LIST_HEAD(&master->magicfree);
+       master->minor = minor;
+
+       list_add_tail(&master->head, &minor->master_list);
+
+       return master;
+}
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+       kref_get(&master->refcount);
+       return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct kref *kref)
+{
+       struct drm_master *master = container_of(kref, struct drm_master, refcount);
+       struct drm_magic_entry *pt, *next;
+       struct drm_device *dev = master->minor->dev;
+       struct drm_map_list *r_list, *list_temp;
+
+       list_del(&master->head);
+
+       if (dev->driver->master_destroy)
+               dev->driver->master_destroy(dev, master);
+
+       list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+               if (r_list->master == master) {
+                       drm_rmmap_locked(dev, r_list->map);
+                       r_list = NULL;
+               }
+       }
+
+       if (master->unique) {
+               kfree(master->unique);
+               master->unique = NULL;
+               master->unique_len = 0;
+       }
+
+       kfree(dev->devname);
+       dev->devname = NULL;
+
+       list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+               list_del(&pt->head);
+               drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+               kfree(pt);
+       }
+
+       drm_ht_remove(&master->magiclist);
+
+       kfree(master);
+}
+
+void drm_master_put(struct drm_master **master)
+{
+       kref_put(&(*master)->refcount, drm_master_destroy);
+       *master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
+
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       int ret = 0;
+
+       if (file_priv->is_master)
+               return 0;
+
+       if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
+               return -EINVAL;
+
+       if (!file_priv->master)
+               return -EINVAL;
+
+       if (!file_priv->minor->master &&
+           file_priv->minor->master != file_priv->master) {
+               mutex_lock(&dev->struct_mutex);
+               file_priv->minor->master = drm_master_get(file_priv->master);
+               file_priv->is_master = 1;
+               if (dev->driver->master_set) {
+                       ret = dev->driver->master_set(dev, file_priv, false);
+                       if (unlikely(ret != 0)) {
+                               file_priv->is_master = 0;
+                               drm_master_put(&file_priv->minor->master);
+                       }
+               }
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       return 0;
+}
+
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       if (!file_priv->is_master)
+               return -EINVAL;
+
+       if (!file_priv->minor->master)
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+       if (dev->driver->master_drop)
+               dev->driver->master_drop(dev, file_priv, false);
+       drm_master_put(&file_priv->minor->master);
+       file_priv->is_master = 0;
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+int drm_fill_in_dev(struct drm_device *dev,
+                          const struct pci_device_id *ent,
+                          struct drm_driver *driver)
+{
+       int retcode;
+
+       INIT_LIST_HEAD(&dev->filelist);
+       INIT_LIST_HEAD(&dev->ctxlist);
+       INIT_LIST_HEAD(&dev->vmalist);
+       INIT_LIST_HEAD(&dev->maplist);
+       INIT_LIST_HEAD(&dev->vblank_event_list);
+
+       spin_lock_init(&dev->count_lock);
+       spin_lock_init(&dev->event_lock);
+       mutex_init(&dev->struct_mutex);
+       mutex_init(&dev->ctxlist_mutex);
+
+       if (drm_ht_create(&dev->map_hash, 12)) {
+               return -ENOMEM;
+       }
+
+       /* the DRM has 6 basic counters */
+       dev->counters = 6;
+       dev->types[0] = _DRM_STAT_LOCK;
+       dev->types[1] = _DRM_STAT_OPENS;
+       dev->types[2] = _DRM_STAT_CLOSES;
+       dev->types[3] = _DRM_STAT_IOCTLS;
+       dev->types[4] = _DRM_STAT_LOCKS;
+       dev->types[5] = _DRM_STAT_UNLOCKS;
+
+       dev->driver = driver;
+
+       if (drm_core_has_AGP(dev)) {
+               if (drm_device_is_agp(dev))
+                       dev->agp = drm_agp_init(dev);
+               if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+                   && (dev->agp == NULL)) {
+                       DRM_ERROR("Cannot initialize the agpgart module.\n");
+                       retcode = -EINVAL;
+                       goto error_out_unreg;
+               }
+               if (drm_core_has_MTRR(dev)) {
+                       if (dev->agp)
+                               dev->agp->agp_mtrr =
+                                   mtrr_add(dev->agp->agp_info.aper_base,
+                                            dev->agp->agp_info.aper_size *
+                                            1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+               }
+       }
+
+
+       retcode = drm_ctxbitmap_init(dev);
+       if (retcode) {
+               DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+               goto error_out_unreg;
+       }
+
+       if (driver->driver_features & DRIVER_GEM) {
+               retcode = drm_gem_init(dev);
+               if (retcode) {
+                       DRM_ERROR("Cannot initialize graphics execution "
+                                 "manager (GEM)\n");
+                       goto error_out_unreg;
+               }
+       }
+
+       return 0;
+
+      error_out_unreg:
+       drm_lastclose(dev);
+       return retcode;
+}
+
+
+/**
+ * Get a secondary minor number.
+ *
+ * \param dev device data structure
+ * \param sec-minor structure to hold the assigned minor
+ * \return negative number on failure.
+ *
+ * Search an empty entry and initialize it to the given parameters, and
+ * create the proc init entry via proc_init(). This routines assigns
+ * minor numbers to secondary heads of multi-headed cards
+ */
+int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+{
+       struct drm_minor *new_minor;
+       int ret;
+       int minor_id;
+
+       DRM_DEBUG("\n");
+
+       minor_id = drm_minor_get_id(dev, type);
+       if (minor_id < 0)
+               return minor_id;
+
+       new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
+       if (!new_minor) {
+               ret = -ENOMEM;
+               goto err_idr;
+       }
+
+       new_minor->type = type;
+       new_minor->device = MKDEV(DRM_MAJOR, minor_id);
+       new_minor->dev = dev;
+       new_minor->index = minor_id;
+       INIT_LIST_HEAD(&new_minor->master_list);
+
+       idr_replace(&drm_minors_idr, new_minor, minor_id);
+
+       if (type == DRM_MINOR_LEGACY) {
+               ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
+               if (ret) {
+                       DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
+                       goto err_mem;
+               }
+       } else
+               new_minor->proc_root = NULL;
+
+#if defined(CONFIG_DEBUG_FS)
+       ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
+       if (ret) {
+               DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
+               goto err_g2;
+       }
+#endif
+
+       ret = drm_sysfs_device_add(new_minor);
+       if (ret) {
+               printk(KERN_ERR
+                      "DRM: Error sysfs_device_add.\n");
+               goto err_g2;
+       }
+       *minor = new_minor;
+
+       DRM_DEBUG("new minor assigned %d\n", minor_id);
+       return 0;
+
+
+err_g2:
+       if (new_minor->type == DRM_MINOR_LEGACY)
+               drm_proc_cleanup(new_minor, drm_proc_root);
+err_mem:
+       kfree(new_minor);
+err_idr:
+       idr_remove(&drm_minors_idr, minor_id);
+       *minor = NULL;
+       return ret;
+}
+
+/**
+ * Put a secondary minor number.
+ *
+ * \param sec_minor - structure to be released
+ * \return always zero
+ *
+ * Cleans up the proc resources. Not legal for this to be the
+ * last minor released.
+ *
+ */
+int drm_put_minor(struct drm_minor **minor_p)
+{
+       struct drm_minor *minor = *minor_p;
+
+       DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+       if (minor->type == DRM_MINOR_LEGACY)
+               drm_proc_cleanup(minor, drm_proc_root);
+#if defined(CONFIG_DEBUG_FS)
+       drm_debugfs_cleanup(minor);
+#endif
+
+       drm_sysfs_device_remove(minor);
+
+       idr_remove(&drm_minors_idr, minor->index);
+
+       kfree(minor);
+       *minor_p = NULL;
+       return 0;
+}
+
+/**
+ * Called via drm_exit() at module unload time or when pci device is
+ * unplugged.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
+ *
+ * \sa drm_init
+ */
+void drm_put_dev(struct drm_device *dev)
+{
+       struct drm_driver *driver;
+       struct drm_map_list *r_list, *list_temp;
+
+       DRM_DEBUG("\n");
+
+       if (!dev) {
+               DRM_ERROR("cleanup called no dev\n");
+               return;
+       }
+       driver = dev->driver;
+
+       drm_lastclose(dev);
+
+       if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->agp_mtrr >= 0) {
+               int retval;
+               retval = mtrr_del(dev->agp->agp_mtrr,
+                                 dev->agp->agp_info.aper_base,
+                                 dev->agp->agp_info.aper_size * 1024 * 1024);
+               DRM_DEBUG("mtrr_del=%d\n", retval);
+       }
+
+       if (dev->driver->unload)
+               dev->driver->unload(dev);
+
+       if (drm_core_has_AGP(dev) && dev->agp) {
+               kfree(dev->agp);
+               dev->agp = NULL;
+       }
+
+       drm_vblank_cleanup(dev);
+
+       list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+               drm_rmmap(dev, r_list->map);
+       drm_ht_remove(&dev->map_hash);
+
+       drm_ctxbitmap_cleanup(dev);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_put_minor(&dev->control);
+
+       if (driver->driver_features & DRIVER_GEM)
+               drm_gem_destroy(dev);
+
+       drm_put_minor(&dev->primary);
+
+       if (dev->devname) {
+               kfree(dev->devname);
+               dev->devname = NULL;
+       }
+       kfree(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
diff --git a/services4/3rdparty/linux_drm/drm_sysfs.c b/services4/3rdparty/linux_drm/drm_sysfs.c
new file mode 100644 (file)
index 0000000..85da4c4
--- /dev/null
@@ -0,0 +1,553 @@
+
+/*
+ * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
+ *               extra sysfs attribute from DRM. Normal drm_sysfs_class
+ *               does not allow adding attributes.
+ *
+ * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2003-2004 IBM Corp.
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+
+#include "drm_sysfs.h"
+#include "drm_core.h"
+#include "drmP.h"
+
+#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
+#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+
+static struct device_type drm_sysfs_device_minor = {
+       .name = "drm_minor"
+};
+
+/**
+ * drm_class_suspend - DRM class suspend hook
+ * @dev: Linux device to suspend
+ * @state: power state to enter
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its suspend hook, if present.
+ */
+static int drm_class_suspend(struct device *dev, pm_message_t state)
+{
+       if (dev->type == &drm_sysfs_device_minor) {
+               struct drm_minor *drm_minor = to_drm_minor(dev);
+               struct drm_device *drm_dev = drm_minor->dev;
+
+               if (drm_minor->type == DRM_MINOR_LEGACY &&
+                   !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
+                   drm_dev->driver->suspend)
+                       return drm_dev->driver->suspend(drm_dev, state);
+       }
+       return 0;
+}
+
+/**
+ * drm_class_resume - DRM class resume hook
+ * @dev: Linux device to resume
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its resume hook, if present.
+ */
+static int drm_class_resume(struct device *dev)
+{
+       if (dev->type == &drm_sysfs_device_minor) {
+               struct drm_minor *drm_minor = to_drm_minor(dev);
+               struct drm_device *drm_dev = drm_minor->dev;
+
+               if (drm_minor->type == DRM_MINOR_LEGACY &&
+                   !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
+                   drm_dev->driver->resume)
+                       return drm_dev->driver->resume(drm_dev);
+       }
+       return 0;
+}
+
+static char *drm_devnode(struct device *dev, mode_t *mode)
+{
+       return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
+}
+
+static CLASS_ATTR_STRING(version, S_IRUGO,
+               CORE_NAME " "
+               __stringify(CORE_MAJOR) "."
+               __stringify(CORE_MINOR) "."
+               __stringify(CORE_PATCHLEVEL) " "
+               CORE_DATE);
+
+/**
+ * drm_sysfs_create - create a struct drm_sysfs_class structure
+ * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
+ * @name: pointer to a string for the name of this class.
+ *
+ * This is used to create DRM class pointer that can then be used
+ * in calls to drm_sysfs_device_add().
+ *
+ * Note, the pointer created here is to be destroyed when finished by making a
+ * call to drm_sysfs_destroy().
+ */
+struct class *drm_sysfs_create(struct module *owner, char *name)
+{
+       struct class *class;
+       int err;
+
+       class = class_create(owner, name);
+       if (IS_ERR(class)) {
+               err = PTR_ERR(class);
+               goto err_out;
+       }
+
+       class->suspend = drm_class_suspend;
+       class->resume = drm_class_resume;
+
+       err = class_create_file(class, &class_attr_version.attr);
+       if (err)
+               goto err_out_class;
+
+       class->devnode = drm_devnode;
+
+       return class;
+
+err_out_class:
+       class_destroy(class);
+err_out:
+       return ERR_PTR(err);
+}
+
+/**
+ * drm_sysfs_destroy - destroys DRM class
+ *
+ * Destroy the DRM device class.
+ */
+void drm_sysfs_destroy(void)
+{
+       if ((drm_class == NULL) || (IS_ERR(drm_class)))
+               return;
+       class_remove_file(drm_class, &class_attr_version.attr);
+       class_destroy(drm_class);
+}
+
+/**
+ * drm_sysfs_device_release - do nothing
+ * @dev: Linux device
+ *
+ * Normally, this would free the DRM device associated with @dev, along
+ * with cleaning up any other stuff.  But we do that in the DRM core, so
+ * this function can just return and hope that the core does its job.
+ */
+static void drm_sysfs_device_release(struct device *dev)
+{
+       memset(dev, 0, sizeof(struct device));
+       return;
+}
+
+/*
+ * Connector properties
+ */
+static ssize_t status_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       enum drm_connector_status status;
+
+       status = connector->funcs->detect(connector, true);
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       drm_get_connector_status_name(status));
+}
+
+static ssize_t dpms_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       uint64_t dpms_status;
+       int ret;
+
+       ret = drm_connector_property_get_value(connector,
+                                           dev->mode_config.dpms_property,
+                                           &dpms_status);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       drm_get_dpms_name((int)dpms_status));
+}
+
+static ssize_t enabled_show(struct device *device,
+                           struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
+                       "disabled");
+}
+
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+                        struct bin_attribute *attr, char *buf, loff_t off,
+                        size_t count)
+{
+       struct device *connector_dev = container_of(kobj, struct device, kobj);
+       struct drm_connector *connector = to_drm_connector(connector_dev);
+       unsigned char *edid;
+       size_t size;
+
+       if (!connector->edid_blob_ptr)
+               return 0;
+
+       edid = connector->edid_blob_ptr->data;
+       size = connector->edid_blob_ptr->length;
+       if (!edid)
+               return 0;
+
+       if (off >= size)
+               return 0;
+
+       if (off + count > size)
+               count = size - off;
+       memcpy(buf, edid + off, count);
+
+       return count;
+}
+
+static ssize_t modes_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_display_mode *mode;
+       int written = 0;
+
+       list_for_each_entry(mode, &connector->modes, head) {
+               written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+                                   mode->name);
+       }
+
+       return written;
+}
+
+static ssize_t subconnector_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       struct drm_property *prop = NULL;
+       uint64_t subconnector;
+       int is_tv = 0;
+       int ret;
+
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+                       prop = dev->mode_config.dvi_i_subconnector_property;
+                       break;
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+               case DRM_MODE_CONNECTOR_TV:
+                       prop = dev->mode_config.tv_subconnector_property;
+                       is_tv = 1;
+                       break;
+               default:
+                       DRM_ERROR("Wrong connector type for this property\n");
+                       return 0;
+       }
+
+       if (!prop) {
+               DRM_ERROR("Unable to find subconnector property\n");
+               return 0;
+       }
+
+       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+                       drm_get_tv_subconnector_name((int)subconnector) :
+                       drm_get_dvi_i_subconnector_name((int)subconnector));
+}
+
+static ssize_t select_subconnector_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       struct drm_property *prop = NULL;
+       uint64_t subconnector;
+       int is_tv = 0;
+       int ret;
+
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+                       prop = dev->mode_config.dvi_i_select_subconnector_property;
+                       break;
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+               case DRM_MODE_CONNECTOR_TV:
+                       prop = dev->mode_config.tv_select_subconnector_property;
+                       is_tv = 1;
+                       break;
+               default:
+                       DRM_ERROR("Wrong connector type for this property\n");
+                       return 0;
+       }
+
+       if (!prop) {
+               DRM_ERROR("Unable to find select subconnector property\n");
+               return 0;
+       }
+
+       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+                       drm_get_tv_select_name((int)subconnector) :
+                       drm_get_dvi_i_select_name((int)subconnector));
+}
+
+static struct device_attribute connector_attrs[] = {
+       __ATTR_RO(status),
+       __ATTR_RO(enabled),
+       __ATTR_RO(dpms),
+       __ATTR_RO(modes),
+};
+
+/* These attributes are for both DVI-I connectors and all types of tv-out. */
+static struct device_attribute connector_attrs_opt1[] = {
+       __ATTR_RO(subconnector),
+       __ATTR_RO(select_subconnector),
+};
+
+static struct bin_attribute edid_attr = {
+       .attr.name = "edid",
+       .attr.mode = 0444,
+       .size = 0,
+       .read = edid_show,
+};
+
+/**
+ * drm_sysfs_connector_add - add an connector to sysfs
+ * @connector: connector to add
+ *
+ * Create an connector device in sysfs, along with its associated connector
+ * properties (so far, connection status, dpms, mode list & edid) and
+ * generate a hotplug event so userspace knows there's a new connector
+ * available.
+ *
+ * Note:
+ * This routine should only be called *once* for each DRM minor registered.
+ * A second call for an already registered device will trigger the BUG_ON
+ * below.
+ */
+int drm_sysfs_connector_add(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       int attr_cnt = 0;
+       int opt_cnt = 0;
+       int i;
+       int ret = 0;
+
+       /* We shouldn't get called more than once for the same connector */
+       BUG_ON(device_is_registered(&connector->kdev));
+
+       connector->kdev.parent = &dev->primary->kdev;
+       connector->kdev.class = drm_class;
+       connector->kdev.release = drm_sysfs_device_release;
+
+       DRM_DEBUG("adding \"%s\" to sysfs\n",
+                 drm_get_connector_name(connector));
+
+       dev_set_name(&connector->kdev, "card%d-%s",
+                    dev->primary->index, drm_get_connector_name(connector));
+       ret = device_register(&connector->kdev);
+
+       if (ret) {
+               DRM_ERROR("failed to register connector device: %d\n", ret);
+               goto out;
+       }
+
+       /* Standard attributes */
+
+       for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
+               ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
+               if (ret)
+                       goto err_out_files;
+       }
+
+       /* Optional attributes */
+       /*
+        * In the long run it maybe a good idea to make one set of
+        * optionals per connector type.
+        */
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+               case DRM_MODE_CONNECTOR_TV:
+                       for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
+                               ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
+                               if (ret)
+                                       goto err_out_files;
+                       }
+                       break;
+               default:
+                       break;
+       }
+
+       ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+       if (ret)
+               goto err_out_files;
+
+       /* Let userspace know we have a new connector */
+       drm_sysfs_hotplug_event(dev);
+
+       return 0;
+
+err_out_files:
+       for (i = 0; i < opt_cnt; i++)
+               device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
+       for (i = 0; i < attr_cnt; i++)
+               device_remove_file(&connector->kdev, &connector_attrs[i]);
+       device_unregister(&connector->kdev);
+
+out:
+       return ret;
+}
+EXPORT_SYMBOL(drm_sysfs_connector_add);
+
+/**
+ * drm_sysfs_connector_remove - remove an connector device from sysfs
+ * @connector: connector to remove
+ *
+ * Remove @connector and its associated attributes from sysfs.  Note that
+ * the device model core will take care of sending the "remove" uevent
+ * at this time, so we don't need to do it.
+ *
+ * Note:
+ * This routine should only be called if the connector was previously
+ * successfully registered.  If @connector hasn't been registered yet,
+ * you'll likely see a panic somewhere deep in sysfs code when called.
+ */
+void drm_sysfs_connector_remove(struct drm_connector *connector)
+{
+       int i;
+
+       DRM_DEBUG("removing \"%s\" from sysfs\n",
+                 drm_get_connector_name(connector));
+
+       for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
+               device_remove_file(&connector->kdev, &connector_attrs[i]);
+       sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
+       device_unregister(&connector->kdev);
+}
+EXPORT_SYMBOL(drm_sysfs_connector_remove);
+
+/**
+ * drm_sysfs_hotplug_event - generate a DRM uevent
+ * @dev: DRM device
+ *
+ * Send a uevent for the DRM device specified by @dev.  Currently we only
+ * set HOTPLUG=1 in the uevent environment, but this could be expanded to
+ * deal with other types of events.
+ */
+void drm_sysfs_hotplug_event(struct drm_device *dev)
+{
+       char *event_string = "HOTPLUG=1";
+       char *envp[] = { event_string, NULL };
+
+       DRM_DEBUG("generating hotplug event\n");
+
+       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+
+/**
+ * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+ * @dev: DRM device to be added
+ * @head: DRM head in question
+ *
+ * Add a DRM device to the DRM's device model class.  We use @dev's PCI device
+ * as the parent for the Linux device, and make sure it has a file containing
+ * the driver we're using (for userspace compatibility).
+ */
+int drm_sysfs_device_add(struct drm_minor *minor)
+{
+       int err;
+       char *minor_str;
+
+       minor->kdev.parent = minor->dev->dev;
+
+       minor->kdev.class = drm_class;
+       minor->kdev.release = drm_sysfs_device_release;
+       minor->kdev.devt = minor->device;
+       minor->kdev.type = &drm_sysfs_device_minor;
+       if (minor->type == DRM_MINOR_CONTROL)
+               minor_str = "controlD%d";
+        else if (minor->type == DRM_MINOR_RENDER)
+                minor_str = "renderD%d";
+        else
+                minor_str = "card%d";
+
+       dev_set_name(&minor->kdev, minor_str, minor->index);
+
+       err = device_register(&minor->kdev);
+       if (err) {
+               DRM_ERROR("device add failed: %d\n", err);
+               goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       return err;
+}
+
+/**
+ * drm_sysfs_device_remove - remove DRM device
+ * @dev: DRM device to remove
+ *
+ * This call unregisters and cleans up a class device that was created with a
+ * call to drm_sysfs_device_add()
+ */
+void drm_sysfs_device_remove(struct drm_minor *minor)
+{
+       device_unregister(&minor->kdev);
+}
+
+
+/**
+ * drm_class_device_register - Register a struct device in the drm class.
+ *
+ * @dev: pointer to struct device to register.
+ *
+ * @dev should have all relevant members pre-filled with the exception
+ * of the class member. In particular, the device_type member must
+ * be set.
+ */
+
+int drm_class_device_register(struct device *dev)
+{
+       dev->class = drm_class;
+       return device_register(dev);
+}
+EXPORT_SYMBOL_GPL(drm_class_device_register);
+
+void drm_class_device_unregister(struct device *dev)
+{
+       return device_unregister(dev);
+}
+EXPORT_SYMBOL_GPL(drm_class_device_unregister);
diff --git a/services4/3rdparty/linux_drm/drm_trace_points.c b/services4/3rdparty/linux_drm/drm_trace_points.c
new file mode 100644 (file)
index 0000000..0d0eb90
--- /dev/null
@@ -0,0 +1,4 @@
+#include "drmP.h"
+
+#define CREATE_TRACE_POINTS
+#include "drm_trace.h"
diff --git a/services4/3rdparty/linux_drm/drm_vm.c b/services4/3rdparty/linux_drm/drm_vm.c
new file mode 100644 (file)
index 0000000..2c3fcbd
--- /dev/null
@@ -0,0 +1,690 @@
+/**
+ * \file drm_vm.c
+ * Memory mapping for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#if defined(__ia64__)
+#include <linux/efi.h>
+#include <linux/slab.h>
+#endif
+
+static void drm_vm_open(struct vm_area_struct *vma);
+static void drm_vm_close(struct vm_area_struct *vma);
+
+static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+       pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__i386__) || defined(__x86_64__)
+       if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+               pgprot_val(tmp) |= _PAGE_PCD;
+               pgprot_val(tmp) &= ~_PAGE_PWT;
+       }
+#elif defined(__powerpc__)
+       pgprot_val(tmp) |= _PAGE_NO_CACHE;
+       if (map_type == _DRM_REGISTERS)
+               pgprot_val(tmp) |= _PAGE_GUARDED;
+#elif defined(__ia64__)
+       if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+                                   vma->vm_start))
+               tmp = pgprot_writecombine(tmp);
+       else
+               tmp = pgprot_noncached(tmp);
+#elif defined(__sparc__) || defined(__arm__)
+       tmp = pgprot_noncached(tmp);
+#endif
+       return tmp;
+}
+
+static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+       pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+       tmp |= _PAGE_NO_CACHE;
+#endif
+       return tmp;
+}
+
+/**
+ * \c fault method for AGP virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Find the right map and if it's AGP memory find the real physical page to
+ * map, get the page, increment the use count and return it.
+ */
+#if __OS_HAS_AGP
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_local_map *map = NULL;
+       struct drm_map_list *r_list;
+       struct drm_hash_item *hash;
+
+       /*
+        * Find the right map
+        */
+       if (!drm_core_has_AGP(dev))
+               goto vm_fault_error;
+
+       if (!dev->agp || !dev->agp->cant_use_aperture)
+               goto vm_fault_error;
+
+       if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
+               goto vm_fault_error;
+
+       r_list = drm_hash_entry(hash, struct drm_map_list, hash);
+       map = r_list->map;
+
+       if (map && map->type == _DRM_AGP) {
+               /*
+                * Using vm_pgoff as a selector forces us to use this unusual
+                * addressing scheme.
+                */
+               resource_size_t offset = (unsigned long)vmf->virtual_address -
+                       vma->vm_start;
+               resource_size_t baddr = map->offset + offset;
+               struct drm_agp_mem *agpmem;
+               struct page *page;
+
+#ifdef __alpha__
+               /*
+                * Adjust to a bus-relative address
+                */
+               baddr -= dev->hose->mem_space->start;
+#endif
+
+               /*
+                * It's AGP memory - find the real physical page to map
+                */
+               list_for_each_entry(agpmem, &dev->agp->memory, head) {
+                       if (agpmem->bound <= baddr &&
+                           agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
+                               break;
+               }
+
+               if (&agpmem->head == &dev->agp->memory)
+                       goto vm_fault_error;
+
+               /*
+                * Get the page, inc the use count, and return it
+                */
+               offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
+               page = agpmem->memory->pages[offset];
+               get_page(page);
+               vmf->page = page;
+
+               DRM_DEBUG
+                   ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
+                    (unsigned long long)baddr,
+                    agpmem->memory->pages[offset],
+                    (unsigned long long)offset,
+                    page_count(page));
+               return 0;
+       }
+vm_fault_error:
+       return VM_FAULT_SIGBUS; /* Disallow mremap */
+}
+#else                          /* __OS_HAS_AGP */
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+#endif                         /* __OS_HAS_AGP */
+
+/**
+ * \c nopage method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Get the mapping, find the real physical page to map, get the page, and
+ * return it.
+ */
+static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_local_map *map = vma->vm_private_data;
+       unsigned long offset;
+       unsigned long i;
+       struct page *page;
+
+       if (!map)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+       i = (unsigned long)map->handle + offset;
+       page = vmalloc_to_page((void *)i);
+       if (!page)
+               return VM_FAULT_SIGBUS;
+       get_page(page);
+       vmf->page = page;
+
+       DRM_DEBUG("shm_fault 0x%lx\n", offset);
+       return 0;
+}
+
+/**
+ * \c close method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Deletes map information if we are the last
+ * person to close a mapping and it's not in the global maplist.
+ */
+static void drm_vm_shm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_vma_entry *pt, *temp;
+       struct drm_local_map *map;
+       struct drm_map_list *r_list;
+       int found_maps = 0;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_dec(&dev->vma_count);
+
+       map = vma->vm_private_data;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+               if (pt->vma->vm_private_data == map)
+                       found_maps++;
+               if (pt->vma == vma) {
+                       list_del(&pt->head);
+                       kfree(pt);
+               }
+       }
+
+       /* We were the only map that was found */
+       if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
+               /* Check to see if we are in the maplist, if we are not, then
+                * we delete this mappings information.
+                */
+               found_maps = 0;
+               list_for_each_entry(r_list, &dev->maplist, head) {
+                       if (r_list->map == map)
+                               found_maps++;
+               }
+
+               if (!found_maps) {
+                       drm_dma_handle_t dmah;
+
+                       switch (map->type) {
+                       case _DRM_REGISTERS:
+                       case _DRM_FRAME_BUFFER:
+                               if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+                                       int retcode;
+                                       retcode = mtrr_del(map->mtrr,
+                                                          map->offset,
+                                                          map->size);
+                                       DRM_DEBUG("mtrr_del = %d\n", retcode);
+                               }
+                               iounmap(map->handle);
+                               break;
+                       case _DRM_SHM:
+                               vfree(map->handle);
+                               break;
+                       case _DRM_AGP:
+                       case _DRM_SCATTER_GATHER:
+                               break;
+                       case _DRM_CONSISTENT:
+                               dmah.vaddr = map->handle;
+                               dmah.busaddr = map->offset;
+                               dmah.size = map->size;
+                               __drm_pci_free(dev, &dmah);
+                               break;
+                       case _DRM_GEM:
+                               DRM_ERROR("tried to rmmap GEM object\n");
+                               break;
+                       }
+                       kfree(map);
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c fault method for DMA virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
+ */
+static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_device_dma *dma = dev->dma;
+       unsigned long offset;
+       unsigned long page_nr;
+       struct page *page;
+
+       if (!dma)
+               return VM_FAULT_SIGBUS; /* Error */
+       if (!dma->pagelist)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
+       page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
+       page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+
+       get_page(page);
+       vmf->page = page;
+
+       DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
+       return 0;
+}
+
+/**
+ * \c fault method for scatter-gather virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
+ */
+static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_local_map *map = vma->vm_private_data;
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_sg_mem *entry = dev->sg;
+       unsigned long offset;
+       unsigned long map_offset;
+       unsigned long page_offset;
+       struct page *page;
+
+       if (!entry)
+               return VM_FAULT_SIGBUS; /* Error */
+       if (!entry->pagelist)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+       map_offset = map->offset - (unsigned long)dev->sg->virtual;
+       page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
+       page = entry->pagelist[page_offset];
+       get_page(page);
+       vmf->page = page;
+
+       return 0;
+}
+
+static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_fault(vma, vmf);
+}
+
+static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_shm_fault(vma, vmf);
+}
+
+static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_dma_fault(vma, vmf);
+}
+
+static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_sg_fault(vma, vmf);
+}
+
+/** AGP virtual memory operations */
+static const struct vm_operations_struct drm_vm_ops = {
+       .fault = drm_vm_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/** Shared virtual memory operations */
+static const struct vm_operations_struct drm_vm_shm_ops = {
+       .fault = drm_vm_shm_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_shm_close,
+};
+
+/** DMA virtual memory operations */
+static const struct vm_operations_struct drm_vm_dma_ops = {
+       .fault = drm_vm_dma_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/** Scatter-gather virtual memory operations */
+static const struct vm_operations_struct drm_vm_sg_ops = {
+       .fault = drm_vm_sg_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/**
+ * \c open method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Create a new drm_vma_entry structure as the \p vma private data entry and
+ * add it to drm_device::vmalist.
+ */
+void drm_vm_open_locked(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_vma_entry *vma_entry;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_inc(&dev->vma_count);
+
+       vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
+       if (vma_entry) {
+               vma_entry->vma = vma;
+               vma_entry->pid = current->pid;
+               list_add(&vma_entry->head, &dev->vmalist);
+       }
+}
+
+static void drm_vm_open(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       drm_vm_open_locked(vma);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+void drm_vm_close_locked(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_vma_entry *pt, *temp;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_dec(&dev->vma_count);
+
+       list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+               if (pt->vma == vma) {
+                       list_del(&pt->head);
+                       kfree(pt);
+                       break;
+               }
+       }
+}
+
+/**
+ * \c close method for all virtual memory types.
+ *
+ * \param vma virtual memory area.
+ *
+ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+ * free it.
+ */
+static void drm_vm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       drm_vm_close_locked(vma);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * Sets the virtual memory area operations structure to vm_dma_ops, the file
+ * pointer, and calls vm_open().
+ */
+static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev;
+       struct drm_device_dma *dma;
+       unsigned long length = vma->vm_end - vma->vm_start;
+
+       dev = priv->minor->dev;
+       dma = dev->dma;
+       DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+                 vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+       /* Length must match exact page count */
+       if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
+               return -EINVAL;
+       }
+
+       if (!capable(CAP_SYS_ADMIN) &&
+           (dma->flags & _DRM_DMA_USE_PCI_RO)) {
+               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+               pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+               /* Ye gads this is ugly.  With more thought
+                  we could move this up higher and use
+                  `protection_map' instead.  */
+               vma->vm_page_prot =
+                   __pgprot(pte_val
+                            (pte_wrprotect
+                             (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+       }
+
+       vma->vm_ops = &drm_vm_dma_ops;
+
+       vma->vm_flags |= VM_RESERVED;   /* Don't swap */
+       vma->vm_flags |= VM_DONTEXPAND;
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+       return 0;
+}
+
+static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
+{
+#ifdef __alpha__
+       return dev->hose->dense_mem_base - dev->hose->mem_space->start;
+#else
+       return 0;
+#endif
+}
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the virtual memory area has no offset associated with it then it's a DMA
+ * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
+ * checks that the restricted flag is not set, sets the virtual memory operations
+ * according to the mapping type and remaps the pages. Finally sets the file
+ * pointer and calls vm_open().
+ */
+int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_local_map *map = NULL;
+       resource_size_t offset = 0;
+       struct drm_hash_item *hash;
+
+       DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+                 vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+       if (!priv->authenticated)
+               return -EACCES;
+
+       /* We check for "dma". On Apple's UniNorth, it's valid to have
+        * the AGP mapped at physical address 0
+        * --BenH.
+        */
+       if (!vma->vm_pgoff
+#if __OS_HAS_AGP
+           && (!dev->agp
+               || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
+#endif
+           )
+               return drm_mmap_dma(filp, vma);
+
+       if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
+               DRM_ERROR("Could not find map\n");
+               return -EINVAL;
+       }
+
+       map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+       if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
+               return -EPERM;
+
+       /* Check for valid size. */
+       if (map->size < vma->vm_end - vma->vm_start)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
+               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+               pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+               /* Ye gads this is ugly.  With more thought
+                  we could move this up higher and use
+                  `protection_map' instead.  */
+               vma->vm_page_prot =
+                   __pgprot(pte_val
+                            (pte_wrprotect
+                             (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+       }
+
+       switch (map->type) {
+#if !defined(__arm__)
+       case _DRM_AGP:
+               if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+                       /*
+                        * On some platforms we can't talk to bus dma address from the CPU, so for
+                        * memory of type DRM_AGP, we'll deal with sorting out the real physical
+                        * pages and mappings in fault()
+                        */
+#if defined(__powerpc__)
+                       pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+#endif
+                       vma->vm_ops = &drm_vm_ops;
+                       break;
+               }
+               /* fall through to _DRM_FRAME_BUFFER... */
+#endif
+       case _DRM_FRAME_BUFFER:
+       case _DRM_REGISTERS:
+               offset = drm_core_get_reg_ofs(dev);
+               vma->vm_flags |= VM_IO; /* not in core dump */
+               vma->vm_page_prot = drm_io_prot(map->type, vma);
+#if !defined(__arm__)
+               if (io_remap_pfn_range(vma, vma->vm_start,
+                                      (map->offset + offset) >> PAGE_SHIFT,
+                                      vma->vm_end - vma->vm_start,
+                                      vma->vm_page_prot))
+                       return -EAGAIN;
+#else
+               if (remap_pfn_range(vma, vma->vm_start,
+                                       (map->offset + offset) >> PAGE_SHIFT,
+                                       vma->vm_end - vma->vm_start,
+                                       vma->vm_page_prot))
+                       return -EAGAIN;
+#endif
+
+               DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
+                         " offset = 0x%llx\n",
+                         map->type,
+                         vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
+
+               vma->vm_ops = &drm_vm_ops;
+               break;
+       case _DRM_CONSISTENT:
+               /* Consistent memory is really like shared memory. But
+                * it's allocated in a different way, so avoid fault */
+               if (remap_pfn_range(vma, vma->vm_start,
+                   page_to_pfn(virt_to_page(map->handle)),
+                   vma->vm_end - vma->vm_start, vma->vm_page_prot))
+                       return -EAGAIN;
+               vma->vm_page_prot = drm_dma_prot(map->type, vma);
+       /* fall through to _DRM_SHM */
+       case _DRM_SHM:
+               vma->vm_ops = &drm_vm_shm_ops;
+               vma->vm_private_data = (void *)map;
+               /* Don't let this area swap.  Change when
+                  DRM_KERNEL advisory is supported. */
+               vma->vm_flags |= VM_RESERVED;
+               break;
+       case _DRM_SCATTER_GATHER:
+               vma->vm_ops = &drm_vm_sg_ops;
+               vma->vm_private_data = (void *)map;
+               vma->vm_flags |= VM_RESERVED;
+               vma->vm_page_prot = drm_dma_prot(map->type, vma);
+               break;
+       default:
+               return -EINVAL; /* This should never happen. */
+       }
+       vma->vm_flags |= VM_RESERVED;   /* Don't swap */
+       vma->vm_flags |= VM_DONTEXPAND;
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+       return 0;
+}
+
+int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_mmap_locked(filp, vma);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_mmap);
diff --git a/services4/3rdparty/linux_drm/kbuild/Makefile b/services4/3rdparty/linux_drm/kbuild/Makefile
new file mode 100644 (file)
index 0000000..6d0ce96
--- /dev/null
@@ -0,0 +1,73 @@
+#
+# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+# 
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+# 
+# This program is distributed in the hope it will be useful but, except 
+# as otherwise stated in writing, without any warranty; without even the 
+# implied warranty of merchantability or fitness for a particular purpose. 
+# See the GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+# 
+#
+
+MODULE         = drm
+
+INCLUDES =
+
+SOURCES        =
+
+ifneq ($(SUPPORT_DRI_DRM),1)
+$(error "SUPPORT_DRI_DRM must be set")
+endif
+
+SOURCES_ROOT = ..
+
+INT_SOURCE_LIST += \
+       pvr_drm_stubs.c
+
+INT_SOURCES := $(addprefix $(SOURCES_ROOT)/,$(INT_SOURCE_LIST))
+SOURCES += $(INT_SOURCES)
+
+EXT_SOURCE_DIR := $(KERNELDIR)/drivers/gpu/drm
+
+EXT_BUILD_DIR := $(SOURCES_ROOT)/kbuild/tmp_$(PVR_BUILD_DIR)_$(BUILD)_$(MODULE)
+
+$(src)/$(EXT_BUILD_DIR)/%.c: $(EXT_SOURCE_DIR)/%.c
+       $(SILENT)$(MKDIR) $(dir $@)
+       $(SILENT)$(CP) $< $@
+
+clean-dirs += $(EXT_BUILD_DIR)
+
+EXT_SOURCE_LIST := \
+       drm_auth.c drm_bufs.c drm_cache.c \
+       drm_context.c drm_dma.c drm_drawable.c \
+       drm_drv.c drm_fops.c drm_gem.c drm_ioctl.c drm_irq.c \
+       drm_lock.c drm_memory.c drm_proc.c drm_stub.c drm_vm.c \
+       drm_agpsupport.c drm_scatter.c ati_pcigart.c drm_pci.c \
+       drm_sysfs.c drm_hashtab.c drm_sman.c drm_mm.c \
+       drm_crtc.c drm_modes.c drm_edid.c \
+       drm_info.c drm_debugfs.c drm_encoder_slave.c
+
+EXT_SOURCES := $(addprefix $(EXT_BUILD_DIR)/,$(EXT_SOURCE_LIST))
+SOURCES += $(EXT_SOURCES)
+
+INCLUDES += \
+               -I$(KERNELDIR)/include/drm \
+               -I$(EXT_SOURCE_DIR)
+
+MODULE_CFLAGS += -DCONFIG_PCI -Wno-error
+
+include $(EURASIAROOT)/eurasiacon/build/linux/kbuild/Makefile.kbuild_subdir_common
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/ati_pcigart.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/ati_pcigart.c
new file mode 100644 (file)
index 0000000..628eae3
--- /dev/null
@@ -0,0 +1,195 @@
+/**
+ * \file ati_pcigart.c
+ * ATI PCI GART support
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+# define ATI_PCIGART_PAGE_SIZE         4096    /**< PCI GART page size */
+
+static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
+                                      struct drm_ati_pcigart_info *gart_info)
+{
+       gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
+                                               PAGE_SIZE,
+                                               gart_info->table_mask);
+       if (gart_info->table_handle == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void drm_ati_free_pcigart_table(struct drm_device *dev,
+                                      struct drm_ati_pcigart_info *gart_info)
+{
+       drm_pci_free(dev, gart_info->table_handle);
+       gart_info->table_handle = NULL;
+}
+
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+       struct drm_sg_mem *entry = dev->sg;
+       unsigned long pages;
+       int i;
+       int max_pages;
+
+       /* we need to support large memory configurations */
+       if (!entry) {
+               DRM_ERROR("no scatter/gather memory!\n");
+               return 0;
+       }
+
+       if (gart_info->bus_addr) {
+
+               max_pages = (gart_info->table_size / sizeof(u32));
+               pages = (entry->pages <= max_pages)
+                 ? entry->pages : max_pages;
+
+               for (i = 0; i < pages; i++) {
+                       if (!entry->busaddr[i])
+                               break;
+                       pci_unmap_page(dev->pdev, entry->busaddr[i],
+                                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               }
+
+               if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+                       gart_info->bus_addr = 0;
+       }
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
+           gart_info->table_handle) {
+               drm_ati_free_pcigart_table(dev, gart_info);
+       }
+
+       return 1;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
+
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+       struct drm_local_map *map = &gart_info->mapping;
+       struct drm_sg_mem *entry = dev->sg;
+       void *address = NULL;
+       unsigned long pages;
+       u32 *pci_gart = NULL, page_base, gart_idx;
+       dma_addr_t bus_address = 0;
+       int i, j, ret = 0;
+       int max_ati_pages, max_real_pages;
+
+       if (!entry) {
+               DRM_ERROR("no scatter/gather memory!\n");
+               goto done;
+       }
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+               DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+
+               ret = drm_ati_alloc_pcigart_table(dev, gart_info);
+               if (ret) {
+                       DRM_ERROR("cannot allocate PCI GART page!\n");
+                       goto done;
+               }
+
+               pci_gart = gart_info->table_handle->vaddr;
+               address = gart_info->table_handle->vaddr;
+               bus_address = gart_info->table_handle->busaddr;
+       } else {
+               address = gart_info->addr;
+               bus_address = gart_info->bus_addr;
+               DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
+                         (unsigned long long)bus_address,
+                         (unsigned long)address);
+       }
+
+
+       max_ati_pages = (gart_info->table_size / sizeof(u32));
+       max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
+       pages = (entry->pages <= max_real_pages)
+           ? entry->pages : max_real_pages;
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+               memset(pci_gart, 0, max_ati_pages * sizeof(u32));
+       } else {
+               memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
+       }
+
+       gart_idx = 0;
+       for (i = 0; i < pages; i++) {
+               /* we need to support large memory configurations */
+               entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
+                                                0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               if (entry->busaddr[i] == 0) {
+                       DRM_ERROR("unable to map PCIGART pages!\n");
+                       drm_ati_pcigart_cleanup(dev, gart_info);
+                       address = NULL;
+                       bus_address = 0;
+                       goto done;
+               }
+               page_base = (u32) entry->busaddr[i];
+
+               for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+                       u32 val;
+
+                       switch(gart_info->gart_reg_if) {
+                       case DRM_ATI_GART_IGP:
+                               val = page_base | 0xc;
+                               break;
+                       case DRM_ATI_GART_PCIE:
+                               val = (page_base >> 8) | 0xc;
+                               break;
+                       default:
+                       case DRM_ATI_GART_PCI:
+                               val = page_base;
+                               break;
+                       }
+                       if (gart_info->gart_table_location ==
+                           DRM_ATI_GART_MAIN)
+                               pci_gart[gart_idx] = cpu_to_le32(val);
+                       else
+                               DRM_WRITE32(map, gart_idx * sizeof(u32), val);
+                       gart_idx++;
+                       page_base += ATI_PCIGART_PAGE_SIZE;
+               }
+       }
+       ret = 1;
+
+#if defined(__i386__) || defined(__x86_64__)
+       wbinvd();
+#else
+       mb();
+#endif
+
+      done:
+       gart_info->addr = address;
+       gart_info->bus_addr = bus_address;
+       return ret;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_init);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_agpsupport.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_agpsupport.c
new file mode 100644 (file)
index 0000000..d68888f
--- /dev/null
@@ -0,0 +1,506 @@
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include <linux/module.h>
+
+#if __OS_HAS_AGP
+
+#include <asm/agp.h>
+
+/**
+ * Get AGP information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a (output) drm_agp_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been initialized and acquired and fills in the
+ * drm_agp_info structure with the information in drm_agp_head::agp_info.
+ */
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
+{
+       DRM_AGP_KERN *kern;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+
+       kern = &dev->agp->agp_info;
+       info->agp_version_major = kern->version.major;
+       info->agp_version_minor = kern->version.minor;
+       info->mode = kern->mode;
+       info->aperture_base = kern->aper_base;
+       info->aperture_size = kern->aper_size * 1024 * 1024;
+       info->memory_allowed = kern->max_memory << PAGE_SHIFT;
+       info->memory_used = kern->current_memory << PAGE_SHIFT;
+       info->id_vendor = kern->device->vendor;
+       info->id_device = kern->device->device;
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_info);
+
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_info *info = data;
+       int err;
+
+       err = drm_agp_info(dev, info);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+/**
+ * Acquire the AGP device.
+ *
+ * \param dev DRM device that is to acquire AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire(struct drm_device * dev)
+{
+       if (!dev->agp)
+               return -ENODEV;
+       if (dev->agp->acquired)
+               return -EBUSY;
+       if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
+               return -ENODEV;
+       dev->agp->acquired = 1;
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_acquire);
+
+/**
+ * Acquire the AGP device (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
+}
+
+/**
+ * Release the AGP device.
+ *
+ * \param dev DRM device that is to release AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
+ */
+int drm_agp_release(struct drm_device * dev)
+{
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       agp_backend_release(dev->agp->bridge);
+       dev->agp->acquired = 0;
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_release);
+
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       return drm_agp_release(dev);
+}
+
+/**
+ * Enable the AGP bus.
+ *
+ * \param dev DRM device that has previously acquired AGP.
+ * \param mode Requested AGP mode.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired but not enabled, and calls
+ * \c agp_enable.
+ */
+int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
+{
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+
+       dev->agp->mode = mode.mode;
+       agp_enable(dev->agp->bridge, mode.mode);
+       dev->agp->enabled = 1;
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_enable);
+
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_agp_mode *mode = data;
+
+       return drm_agp_enable(dev, *mode);
+}
+
+/**
+ * Allocate AGP memory.
+ *
+ * \param inode device inode.
+ * \param file_priv file private pointer.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired, allocates the
+ * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ */
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+       struct drm_agp_mem *entry;
+       DRM_AGP_MEM *memory;
+       unsigned long pages;
+       u32 type;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+               return -ENOMEM;
+
+       memset(entry, 0, sizeof(*entry));
+
+       pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+       type = (u32) request->type;
+       if (!(memory = drm_alloc_agp(dev, pages, type))) {
+               kfree(entry);
+               return -ENOMEM;
+       }
+
+       entry->handle = (unsigned long)memory->key + 1;
+       entry->memory = memory;
+       entry->bound = 0;
+       entry->pages = pages;
+       list_add(&entry->head, &dev->agp->memory);
+
+       request->handle = entry->handle;
+       request->physical = memory->physical;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_alloc);
+
+
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_agp_buffer *request = data;
+
+       return drm_agp_alloc(dev, request);
+}
+
+/**
+ * Search for the AGP memory entry associated with a handle.
+ *
+ * \param dev DRM device structure.
+ * \param handle AGP memory handle.
+ * \return pointer to the drm_agp_mem structure associated with \p handle.
+ *
+ * Walks through drm_agp_head::memory until finding a matching handle.
+ */
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
+                                          unsigned long handle)
+{
+       struct drm_agp_mem *entry;
+
+       list_for_each_entry(entry, &dev->agp->memory, head) {
+               if (entry->handle == handle)
+                       return entry;
+       }
+       return NULL;
+}
+
+/**
+ * Unbind AGP memory from the GATT (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and acquired, looks-up the AGP memory
+ * entry and passes it to the unbind_agp() function.
+ */
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+       struct drm_agp_mem *entry;
+       int ret;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (!entry->bound)
+               return -EINVAL;
+       ret = drm_unbind_agp(entry->memory);
+       if (ret == 0)
+               entry->bound = 0;
+       return ret;
+}
+EXPORT_SYMBOL(drm_agp_unbind);
+
+
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_agp_binding *request = data;
+
+       return drm_agp_unbind(dev, request);
+}
+
+/**
+ * Bind AGP memory into the GATT (ioctl)
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and that no memory
+ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
+ * it to bind_agp() function.
+ */
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+       struct drm_agp_mem *entry;
+       int retcode;
+       int page;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (entry->bound)
+               return -EINVAL;
+       page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+       if ((retcode = drm_bind_agp(entry->memory, page)))
+               return retcode;
+       entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+       DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
+                 dev->agp->base, entry->bound);
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_bind);
+
+
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_binding *request = data;
+
+       return drm_agp_bind(dev, request);
+}
+
+/**
+ * Free AGP memory (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and looks up the
+ * AGP memory entry. If the memory it's currently bound, unbind it via
+ * unbind_agp(). Frees it via free_agp() as well as the entry itself
+ * and unlinks from the doubly linked list it's inserted in.
+ */
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+       struct drm_agp_mem *entry;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (entry->bound)
+               drm_unbind_agp(entry->memory);
+
+       list_del(&entry->head);
+
+       drm_free_agp(entry->memory, entry->pages);
+       kfree(entry);
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_free);
+
+
+
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_buffer *request = data;
+
+       return drm_agp_free(dev, request);
+}
+
+/**
+ * Initialize the AGP resources.
+ *
+ * \return pointer to a drm_agp_head structure.
+ *
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
+ */
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+       struct drm_agp_head *head = NULL;
+
+       if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+               return NULL;
+       memset((void *)head, 0, sizeof(*head));
+       head->bridge = agp_find_bridge(dev->pdev);
+       if (!head->bridge) {
+               if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
+                       kfree(head);
+                       return NULL;
+               }
+               agp_copy_info(head->bridge, &head->agp_info);
+               agp_backend_release(head->bridge);
+       } else {
+               agp_copy_info(head->bridge, &head->agp_info);
+       }
+       if (head->agp_info.chipset == NOT_SUPPORTED) {
+               kfree(head);
+               return NULL;
+       }
+       INIT_LIST_HEAD(&head->memory);
+       head->cant_use_aperture = head->agp_info.cant_use_aperture;
+       head->page_mask = head->agp_info.page_mask;
+       head->base = head->agp_info.aper_base;
+       return head;
+}
+
+/** Calls agp_allocate_memory() */
+DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
+                                    size_t pages, u32 type)
+{
+       return agp_allocate_memory(bridge, pages, type);
+}
+
+/** Calls agp_free_memory() */
+int drm_agp_free_memory(DRM_AGP_MEM * handle)
+{
+       if (!handle)
+               return 0;
+       agp_free_memory(handle);
+       return 1;
+}
+
+/** Calls agp_bind_memory() */
+int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
+{
+       if (!handle)
+               return -EINVAL;
+       return agp_bind_memory(handle, start);
+}
+
+/** Calls agp_unbind_memory() */
+int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
+{
+       if (!handle)
+               return -EINVAL;
+       return agp_unbind_memory(handle);
+}
+
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+                  struct page **pages,
+                  unsigned long num_pages,
+                  uint32_t gtt_offset,
+                  u32 type)
+{
+       DRM_AGP_MEM *mem;
+       int ret, i;
+
+       DRM_DEBUG("\n");
+
+       mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+                                     type);
+       if (mem == NULL) {
+               DRM_ERROR("Failed to allocate memory for %ld pages\n",
+                         num_pages);
+               return NULL;
+       }
+
+       for (i = 0; i < num_pages; i++)
+               mem->pages[i] = pages[i];
+       mem->page_count = num_pages;
+
+       mem->is_flushed = true;
+       ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+       if (ret != 0) {
+               DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+               agp_free_memory(mem);
+               return NULL;
+       }
+
+       return mem;
+}
+EXPORT_SYMBOL(drm_agp_bind_pages);
+
+void drm_agp_chipset_flush(struct drm_device *dev)
+{
+       agp_flush_chipset(dev->agp->bridge);
+}
+EXPORT_SYMBOL(drm_agp_chipset_flush);
+
+#endif /* __OS_HAS_AGP */
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_auth.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_auth.c
new file mode 100644 (file)
index 0000000..932b5aa
--- /dev/null
@@ -0,0 +1,191 @@
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Find the file with the given magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches in drm_device::magiclist within all files with the same hash key
+ * the one with matching magic number, while holding the drm_device::struct_mutex
+ * lock.
+ */
+static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
+{
+       struct drm_file *retval = NULL;
+       struct drm_magic_entry *pt;
+       struct drm_hash_item *hash;
+       struct drm_device *dev = master->minor->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
+               pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+               retval = pt->priv;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return retval;
+}
+
+/**
+ * Adds a magic number.
+ *
+ * \param dev DRM device.
+ * \param priv file private data.
+ * \param magic magic number.
+ *
+ * Creates a drm_magic_entry structure and appends to the linked list
+ * associated the magic number hash key in drm_device::magiclist, while holding
+ * the drm_device::struct_mutex lock.
+ */
+static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
+                        drm_magic_t magic)
+{
+       struct drm_magic_entry *entry;
+       struct drm_device *dev = master->minor->dev;
+       DRM_DEBUG("%d\n", magic);
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+       memset(entry, 0, sizeof(*entry));
+       entry->priv = priv;
+       entry->hash_item.key = (unsigned long)magic;
+       mutex_lock(&dev->struct_mutex);
+       drm_ht_insert_item(&master->magiclist, &entry->hash_item);
+       list_add_tail(&entry->head, &master->magicfree);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Remove a magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches and unlinks the entry in drm_device::magiclist with the magic
+ * number hash key, while holding the drm_device::struct_mutex lock.
+ */
+static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+{
+       struct drm_magic_entry *pt;
+       struct drm_hash_item *hash;
+       struct drm_device *dev = master->minor->dev;
+
+       DRM_DEBUG("%d\n", magic);
+
+       mutex_lock(&dev->struct_mutex);
+       if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+       pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+       drm_ht_remove_item(&master->magiclist, hash);
+       list_del(&pt->head);
+       mutex_unlock(&dev->struct_mutex);
+
+       kfree(pt);
+
+       return 0;
+}
+
+/**
+ * Get a unique magic number (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a resulting drm_auth structure.
+ * \return zero on success, or a negative number on failure.
+ *
+ * If there is a magic number in drm_file::magic then use it, otherwise
+ * searches an unique non-zero magic number and add it associating it with \p
+ * file_priv.
+ */
+int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       static drm_magic_t sequence = 0;
+       static DEFINE_SPINLOCK(lock);
+       struct drm_auth *auth = data;
+
+       /* Find unique magic */
+       if (file_priv->magic) {
+               auth->magic = file_priv->magic;
+       } else {
+               do {
+                       spin_lock(&lock);
+                       if (!sequence)
+                               ++sequence;     /* reserve 0 */
+                       auth->magic = sequence++;
+                       spin_unlock(&lock);
+               } while (drm_find_file(file_priv->master, auth->magic));
+               file_priv->magic = auth->magic;
+               drm_add_magic(file_priv->master, file_priv, auth->magic);
+       }
+
+       DRM_DEBUG("%u\n", auth->magic);
+
+       return 0;
+}
+
+/**
+ * Authenticate with a magic.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_auth structure.
+ * \return zero if authentication successed, or a negative number otherwise.
+ *
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
+ */
+int drm_authmagic(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_auth *auth = data;
+       struct drm_file *file;
+
+       DRM_DEBUG("%u\n", auth->magic);
+       if ((file = drm_find_file(file_priv->master, auth->magic))) {
+               file->authenticated = 1;
+               drm_remove_magic(file_priv->master, auth->magic);
+               return 0;
+       }
+       return -EINVAL;
+}
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_bufs.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_bufs.c
new file mode 100644 (file)
index 0000000..3d09e30
--- /dev/null
@@ -0,0 +1,1651 @@
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/log2.h>
+#include <asm/shmparam.h>
+#include "drmP.h"
+
+resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
+{
+       return pci_resource_start(dev->pdev, resource);
+}
+EXPORT_SYMBOL(drm_get_resource_start);
+
+resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
+{
+       return pci_resource_len(dev->pdev, resource);
+}
+
+EXPORT_SYMBOL(drm_get_resource_len);
+
+static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+                                                 struct drm_local_map *map)
+{
+       struct drm_map_list *entry;
+       list_for_each_entry(entry, &dev->maplist, head) {
+               /*
+                * Because the kernel-userspace ABI is fixed at a 32-bit offset
+                * while PCI resources may live above that, we ignore the map
+                * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+                * It is assumed that each driver will have only one resource of
+                * each type.
+                */
+               if (!entry->map ||
+                   map->type != entry->map->type ||
+                   entry->master != dev->primary->master)
+                       continue;
+               switch (map->type) {
+               case _DRM_SHM:
+                       if (map->flags != _DRM_CONTAINS_LOCK)
+                               break;
+               case _DRM_REGISTERS:
+               case _DRM_FRAME_BUFFER:
+                       return entry;
+               default: /* Make gcc happy */
+                       ;
+               }
+               if (entry->map->offset == map->offset)
+                       return entry;
+       }
+
+       return NULL;
+}
+
+static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
+                         unsigned long user_token, int hashed_handle, int shm)
+{
+       int use_hashed_handle, shift;
+       unsigned long add;
+
+#if (BITS_PER_LONG == 64)
+       use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
+#elif (BITS_PER_LONG == 32)
+       use_hashed_handle = hashed_handle;
+#else
+#error Unsupported long size. Neither 64 nor 32 bits.
+#endif
+
+       if (!use_hashed_handle) {
+               int ret;
+               hash->key = user_token >> PAGE_SHIFT;
+               ret = drm_ht_insert_item(&dev->map_hash, hash);
+               if (ret != -EINVAL)
+                       return ret;
+       }
+
+       shift = 0;
+       add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
+       if (shm && (SHMLBA > PAGE_SIZE)) {
+               int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
+
+               /* For shared memory, we have to preserve the SHMLBA
+                * bits of the eventual vma->vm_pgoff value during
+                * mmap().  Otherwise we run into cache aliasing problems
+                * on some platforms.  On these platforms, the pgoff of
+                * a mmap() request is used to pick a suitable virtual
+                * address for the mmap() region such that it will not
+                * cause cache aliasing problems.
+                *
+                * Therefore, make sure the SHMLBA relevant bits of the
+                * hash value we use are equal to those in the original
+                * kernel virtual address.
+                */
+               shift = bits;
+               add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
+       }
+
+       return drm_ht_just_insert_please(&dev->map_hash, hash,
+                                        user_token, 32 - PAGE_SHIFT - 3,
+                                        shift, add);
+}
+
+/**
+ * Core function to create a range of memory available for mapping by a
+ * non-root process.
+ *
+ * Adjusts the memory offset to its absolute value according to the mapping
+ * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
+ * applicable and if supported by the kernel.
+ */
+static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
+                          unsigned int size, enum drm_map_type type,
+                          enum drm_map_flags flags,
+                          struct drm_map_list ** maplist)
+{
+       struct drm_local_map *map;
+       struct drm_map_list *list;
+       drm_dma_handle_t *dmah;
+       unsigned long user_token;
+       int ret;
+
+       map = kmalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       map->offset = offset;
+       map->size = size;
+       map->flags = flags;
+       map->type = type;
+
+       /* Only allow shared memory to be removable since we only keep enough
+        * book keeping information about shared memory to allow for removal
+        * when processes fork.
+        */
+       if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
+               kfree(map);
+               return -EINVAL;
+       }
+       DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
+                 (unsigned long long)map->offset, map->size, map->type);
+
+       /* page-align _DRM_SHM maps. They are allocated here so there is no security
+        * hole created by that and it works around various broken drivers that use
+        * a non-aligned quantity to map the SAREA. --BenH
+        */
+       if (map->type == _DRM_SHM)
+               map->size = PAGE_ALIGN(map->size);
+
+       if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
+               kfree(map);
+               return -EINVAL;
+       }
+       map->mtrr = -1;
+       map->handle = NULL;
+
+       switch (map->type) {
+       case _DRM_REGISTERS:
+       case _DRM_FRAME_BUFFER:
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
+               if (map->offset + (map->size-1) < map->offset ||
+                   map->offset < virt_to_phys(high_memory)) {
+                       kfree(map);
+                       return -EINVAL;
+               }
+#endif
+#ifdef __alpha__
+               map->offset += dev->hose->mem_space->start;
+#endif
+               /* Some drivers preinitialize some maps, without the X Server
+                * needing to be aware of it.  Therefore, we just return success
+                * when the server tries to create a duplicate map.
+                */
+               list = drm_find_matching_map(dev, map);
+               if (list != NULL) {
+                       if (list->map->size != map->size) {
+                               DRM_DEBUG("Matching maps of type %d with "
+                                         "mismatched sizes, (%ld vs %ld)\n",
+                                         map->type, map->size,
+                                         list->map->size);
+                               list->map->size = map->size;
+                       }
+
+                       kfree(map);
+                       *maplist = list;
+                       return 0;
+               }
+
+               if (drm_core_has_MTRR(dev)) {
+                       if (map->type == _DRM_FRAME_BUFFER ||
+                           (map->flags & _DRM_WRITE_COMBINING)) {
+                               map->mtrr = mtrr_add(map->offset, map->size,
+                                                    MTRR_TYPE_WRCOMB, 1);
+                       }
+               }
+               if (map->type == _DRM_REGISTERS) {
+                       map->handle = ioremap(map->offset, map->size);
+                       if (!map->handle) {
+                               kfree(map);
+                               return -ENOMEM;
+                       }
+               }
+
+               break;
+       case _DRM_SHM:
+               list = drm_find_matching_map(dev, map);
+               if (list != NULL) {
+                       if(list->map->size != map->size) {
+                               DRM_DEBUG("Matching maps of type %d with "
+                                         "mismatched sizes, (%ld vs %ld)\n",
+                                         map->type, map->size, list->map->size);
+                               list->map->size = map->size;
+                       }
+
+                       kfree(map);
+                       *maplist = list;
+                       return 0;
+               }
+               map->handle = vmalloc_user(map->size);
+               DRM_DEBUG("%lu %d %p\n",
+                         map->size, drm_order(map->size), map->handle);
+               if (!map->handle) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+               map->offset = (unsigned long)map->handle;
+               if (map->flags & _DRM_CONTAINS_LOCK) {
+                       /* Prevent a 2nd X Server from creating a 2nd lock */
+                       if (dev->primary->master->lock.hw_lock != NULL) {
+                               vfree(map->handle);
+                               kfree(map);
+                               return -EBUSY;
+                       }
+                       dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;   /* Pointer to lock */
+               }
+               break;
+       case _DRM_AGP: {
+               struct drm_agp_mem *entry;
+               int valid = 0;
+
+               if (!drm_core_has_AGP(dev)) {
+                       kfree(map);
+                       return -EINVAL;
+               }
+#ifdef __alpha__
+               map->offset += dev->hose->mem_space->start;
+#endif
+               /* In some cases (i810 driver), user space may have already
+                * added the AGP base itself, because dev->agp->base previously
+                * only got set during AGP enable.  So, only add the base
+                * address if the map's offset isn't already within the
+                * aperture.
+                */
+               if (map->offset < dev->agp->base ||
+                   map->offset > dev->agp->base +
+                   dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
+                       map->offset += dev->agp->base;
+               }
+               map->mtrr = dev->agp->agp_mtrr; /* for getmap */
+
+               /* This assumes the DRM is in total control of AGP space.
+                * It's not always the case as AGP can be in the control
+                * of user space (i.e. i810 driver). So this loop will get
+                * skipped and we double check that dev->agp->memory is
+                * actually set as well as being invalid before EPERM'ing
+                */
+               list_for_each_entry(entry, &dev->agp->memory, head) {
+                       if ((map->offset >= entry->bound) &&
+                           (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
+                               valid = 1;
+                               break;
+                       }
+               }
+               if (!list_empty(&dev->agp->memory) && !valid) {
+                       kfree(map);
+                       return -EPERM;
+               }
+               DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
+                         (unsigned long long)map->offset, map->size);
+
+               break;
+       }
+       case _DRM_GEM:
+               DRM_ERROR("tried to addmap GEM object\n");
+               break;
+       case _DRM_SCATTER_GATHER:
+               if (!dev->sg) {
+                       kfree(map);
+                       return -EINVAL;
+               }
+               map->offset += (unsigned long)dev->sg->virtual;
+               break;
+       case _DRM_CONSISTENT:
+               /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
+                * As we're limiting the address to 2^32-1 (or less),
+                * casting it down to 32 bits is no problem, but we
+                * need to point to a 64bit variable first. */
+               dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
+               if (!dmah) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+               map->handle = dmah->vaddr;
+               map->offset = (unsigned long)dmah->busaddr;
+               kfree(dmah);
+               break;
+       default:
+               kfree(map);
+               return -EINVAL;
+       }
+
+       list = kmalloc(sizeof(*list), GFP_KERNEL);
+       if (!list) {
+               if (map->type == _DRM_REGISTERS)
+                       iounmap(map->handle);
+               kfree(map);
+               return -EINVAL;
+       }
+       memset(list, 0, sizeof(*list));
+       list->map = map;
+
+       mutex_lock(&dev->struct_mutex);
+       list_add(&list->head, &dev->maplist);
+
+       /* Assign a 32-bit handle */
+       /* We do it here so that dev->struct_mutex protects the increment */
+       user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
+               map->offset;
+       ret = drm_map_handle(dev, &list->hash, user_token, 0,
+                            (map->type == _DRM_SHM));
+       if (ret) {
+               if (map->type == _DRM_REGISTERS)
+                       iounmap(map->handle);
+               kfree(map);
+               kfree(list);
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       list->user_token = list->hash.key << PAGE_SHIFT;
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!(map->flags & _DRM_DRIVER))
+               list->master = dev->primary->master;
+       *maplist = list;
+       return 0;
+       }
+
+int drm_addmap(struct drm_device * dev, resource_size_t offset,
+              unsigned int size, enum drm_map_type type,
+              enum drm_map_flags flags, struct drm_local_map ** map_ptr)
+{
+       struct drm_map_list *list;
+       int rc;
+
+       rc = drm_addmap_core(dev, offset, size, type, flags, &list);
+       if (!rc)
+               *map_ptr = list->map;
+       return rc;
+}
+
+EXPORT_SYMBOL(drm_addmap);
+
+/**
+ * Ioctl to specify a range of memory that is available for mapping by a
+ * non-root process.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ */
+int drm_addmap_ioctl(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_map *map = data;
+       struct drm_map_list *maplist;
+       int err;
+
+       if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
+               return -EPERM;
+
+       err = drm_addmap_core(dev, map->offset, map->size, map->type,
+                             map->flags, &maplist);
+
+       if (err)
+               return err;
+
+       /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
+       map->handle = (void *)(unsigned long)maplist->user_token;
+       return 0;
+}
+
+/**
+ * Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ *
+ * Searches the map on drm_device::maplist, removes it from the list, see if
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+ * \sa drm_addmap
+ */
+int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
+{
+       struct drm_map_list *r_list = NULL, *list_t;
+       drm_dma_handle_t dmah;
+       int found = 0;
+       struct drm_master *master;
+
+       /* Find the list entry for the map and remove it */
+       list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+               if (r_list->map == map) {
+                       master = r_list->master;
+                       list_del(&r_list->head);
+                       drm_ht_remove_key(&dev->map_hash,
+                                         r_list->user_token >> PAGE_SHIFT);
+                       kfree(r_list);
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               return -EINVAL;
+
+       switch (map->type) {
+       case _DRM_REGISTERS:
+               iounmap(map->handle);
+               /* FALLTHROUGH */
+       case _DRM_FRAME_BUFFER:
+               if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+                       int retcode;
+                       retcode = mtrr_del(map->mtrr, map->offset, map->size);
+                       DRM_DEBUG("mtrr_del=%d\n", retcode);
+               }
+               break;
+       case _DRM_SHM:
+               vfree(map->handle);
+               if (master) {
+                       if (dev->sigdata.lock == master->lock.hw_lock)
+                               dev->sigdata.lock = NULL;
+                       master->lock.hw_lock = NULL;   /* SHM removed */
+                       master->lock.file_priv = NULL;
+                       wake_up_interruptible_all(&master->lock.lock_queue);
+               }
+               break;
+       case _DRM_AGP:
+       case _DRM_SCATTER_GATHER:
+               break;
+       case _DRM_CONSISTENT:
+               dmah.vaddr = map->handle;
+               dmah.busaddr = map->offset;
+               dmah.size = map->size;
+               __drm_pci_free(dev, &dmah);
+               break;
+       case _DRM_GEM:
+               DRM_ERROR("tried to rmmap GEM object\n");
+               break;
+       }
+       kfree(map);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_rmmap_locked);
+
+int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
+{
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_rmmap_locked(dev, map);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_rmmap);
+
+/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+ * exit uncleanly.  Therefore, having userland manually remove mappings seems
+ * like a pointless exercise since they're going away anyway.
+ *
+ * One use case might be after addmap is allowed for normal users for SHM and
+ * gets used by drivers that the server doesn't need to care about.  This seems
+ * unlikely.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_map structure.
+ * \return zero on success or a negative value on error.
+ */
+int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_map *request = data;
+       struct drm_local_map *map = NULL;
+       struct drm_map_list *r_list;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               if (r_list->map &&
+                   r_list->user_token == (unsigned long)request->handle &&
+                   r_list->map->flags & _DRM_REMOVABLE) {
+                       map = r_list->map;
+                       break;
+               }
+       }
+
+       /* List has wrapped around to the head pointer, or its empty we didn't
+        * find anything.
+        */
+       if (list_empty(&dev->maplist) || !map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       /* Register and framebuffer maps are permanent */
+       if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
+               mutex_unlock(&dev->struct_mutex);
+               return 0;
+       }
+
+       ret = drm_rmmap_locked(dev, map);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+/**
+ * Cleanup after an error on one of the addbufs() functions.
+ *
+ * \param dev DRM device.
+ * \param entry buffer entry where the error occurred.
+ *
+ * Frees any pages and buffers associated with the given entry.
+ */
+static void drm_cleanup_buf_error(struct drm_device * dev,
+                                 struct drm_buf_entry * entry)
+{
+       int i;
+
+       if (entry->seg_count) {
+               for (i = 0; i < entry->seg_count; i++) {
+                       if (entry->seglist[i]) {
+                               drm_pci_free(dev, entry->seglist[i]);
+                       }
+               }
+               kfree(entry->seglist);
+
+               entry->seg_count = 0;
+       }
+
+       if (entry->buf_count) {
+               for (i = 0; i < entry->buf_count; i++) {
+                       kfree(entry->buflist[i].dev_private);
+               }
+               kfree(entry->buflist);
+
+               entry->buf_count = 0;
+       }
+}
+
+#if __OS_HAS_AGP
+/**
+ * Add AGP buffers for DMA transfers.
+ *
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
+ * \return zero on success or a negative number on failure.
+ *
+ * After some sanity checks creates a drm_buf structure for each buffer and
+ * reallocates the buffer list of the same size order to accommodate the new
+ * buffers.
+ */
+int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_agp_mem *agp_entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i, valid;
+       struct drm_buf **temp_buflist;
+
+       if (!dma)
+               return -EINVAL;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = dev->agp->base + request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lx\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       /* Make sure buffers are located in AGP memory that we own */
+       valid = 0;
+       list_for_each_entry(agp_entry, &dev->agp->memory, head) {
+               if ((agp_offset >= agp_entry->bound) &&
+                   (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
+                       valid = 1;
+                       break;
+               }
+       }
+       if (!list_empty(&dev->agp->memory) && !valid) {
+               DRM_DEBUG("zone invalid\n");
+               return -EINVAL;
+       }
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+               memset(buf->dev_private, 0, buf->dev_priv_size);
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_AGP;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+EXPORT_SYMBOL(drm_addbufs_agp);
+#endif                         /* __OS_HAS_AGP */
+
+int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int count;
+       int order;
+       int size;
+       int total;
+       int page_order;
+       struct drm_buf_entry *entry;
+       drm_dma_handle_t *dmah;
+       struct drm_buf *buf;
+       int alignment;
+       unsigned long offset;
+       int i;
+       int byte_count;
+       int page_count;
+       unsigned long *temp_pagelist;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
+                 request->count, request->size, size, order, dev->queue_count);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+       if (!entry->seglist) {
+               kfree(entry->buflist);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->seglist, 0, count * sizeof(*entry->seglist));
+
+       /* Keep the original pagelist until we know all the allocations
+        * have succeeded
+        */
+       temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
+                              sizeof(*dma->pagelist), GFP_KERNEL);
+       if (!temp_pagelist) {
+               kfree(entry->buflist);
+               kfree(entry->seglist);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memcpy(temp_pagelist,
+              dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
+       DRM_DEBUG("pagelist: %d entries\n",
+                 dma->page_count + (count << page_order));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+       byte_count = 0;
+       page_count = 0;
+
+       while (entry->buf_count < count) {
+
+               dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+
+               if (!dmah) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       entry->seg_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       kfree(temp_pagelist);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+               entry->seglist[entry->seg_count++] = dmah;
+               for (i = 0; i < (1 << page_order); i++) {
+                       DRM_DEBUG("page %d @ 0x%08lx\n",
+                                 dma->page_count + page_count,
+                                 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
+                       temp_pagelist[dma->page_count + page_count++]
+                               = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
+               }
+               for (offset = 0;
+                    offset + size <= total && entry->buf_count < count;
+                    offset += alignment, ++entry->buf_count) {
+                       buf = &entry->buflist[entry->buf_count];
+                       buf->idx = dma->buf_count + entry->buf_count;
+                       buf->total = alignment;
+                       buf->order = order;
+                       buf->used = 0;
+                       buf->offset = (dma->byte_count + byte_count + offset);
+                       buf->address = (void *)(dmah->vaddr + offset);
+                       buf->bus_address = dmah->busaddr + offset;
+                       buf->next = NULL;
+                       buf->waiting = 0;
+                       buf->pending = 0;
+                       init_waitqueue_head(&buf->dma_wait);
+                       buf->file_priv = NULL;
+
+                       buf->dev_priv_size = dev->driver->dev_priv_size;
+                       buf->dev_private = kmalloc(buf->dev_priv_size,
+                                                 GFP_KERNEL);
+                       if (!buf->dev_private) {
+                               /* Set count correctly so we free the proper amount. */
+                               entry->buf_count = count;
+                               entry->seg_count = count;
+                               drm_cleanup_buf_error(dev, entry);
+                               kfree(temp_pagelist);
+                               mutex_unlock(&dev->struct_mutex);
+                               atomic_dec(&dev->buf_alloc);
+                               return -ENOMEM;
+                       }
+                       memset(buf->dev_private, 0, buf->dev_priv_size);
+
+                       DRM_DEBUG("buffer %d @ %p\n",
+                                 entry->buf_count, buf->address);
+               }
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               kfree(temp_pagelist);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       /* No allocations failed, so now we can replace the orginal pagelist
+        * with the new one.
+        */
+       if (dma->page_count) {
+               kfree(dma->pagelist);
+       }
+       dma->pagelist = temp_pagelist;
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += entry->seg_count << page_order;
+       dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       if (request->flags & _DRM_PCI_BUFFER_RO)
+               dma->flags = _DRM_DMA_USE_PCI_RO;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+
+}
+EXPORT_SYMBOL(drm_addbufs_pci);
+
+static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+                               GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset
+                                       + (unsigned long)dev->sg->virtual);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+
+               memset(buf->dev_private, 0, buf->dev_priv_size);
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_SG;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+
+static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+                               GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+               memset(buf->dev_private, 0, buf->dev_priv_size);
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_FB;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+
+
+/**
+ * Add buffers for DMA transfers (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_buf_desc request.
+ * \return zero on success or a negative number on failure.
+ *
+ * According with the memory type specified in drm_buf_desc::flags and the
+ * build options, it dispatches the call either to addbufs_agp(),
+ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
+ * PCI memory respectively.
+ */
+int drm_addbufs(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_buf_desc *request = data;
+       int ret;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+#if __OS_HAS_AGP
+       if (request->flags & _DRM_AGP_BUFFER)
+               ret = drm_addbufs_agp(dev, request);
+       else
+#endif
+       if (request->flags & _DRM_SG_BUFFER)
+               ret = drm_addbufs_sg(dev, request);
+       else if (request->flags & _DRM_FB_BUFFER)
+               ret = drm_addbufs_fb(dev, request);
+       else
+               ret = drm_addbufs_pci(dev, request);
+
+       return ret;
+}
+
+/**
+ * Get information about the buffer mappings.
+ *
+ * This was originally mean for debugging purposes, or by a sophisticated
+ * client library to determine how best to use the available buffers (e.g.,
+ * large buffers can be used for image transfer).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * lock, preventing of allocating more buffers after this call. Information
+ * about each requested buffer is then copied into user space.
+ */
+int drm_infobufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_info *request = data;
+       int i;
+       int count;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       spin_lock(&dev->count_lock);
+       if (atomic_read(&dev->buf_alloc)) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       ++dev->buf_use;         /* Can't allocate more after this call */
+       spin_unlock(&dev->count_lock);
+
+       for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+               if (dma->bufs[i].buf_count)
+                       ++count;
+       }
+
+       DRM_DEBUG("count = %d\n", count);
+
+       if (request->count >= count) {
+               for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+                       if (dma->bufs[i].buf_count) {
+                               struct drm_buf_desc __user *to =
+                                   &request->list[count];
+                               struct drm_buf_entry *from = &dma->bufs[i];
+                               struct drm_freelist *list = &dma->bufs[i].freelist;
+                               if (copy_to_user(&to->count,
+                                                &from->buf_count,
+                                                sizeof(from->buf_count)) ||
+                                   copy_to_user(&to->size,
+                                                &from->buf_size,
+                                                sizeof(from->buf_size)) ||
+                                   copy_to_user(&to->low_mark,
+                                                &list->low_mark,
+                                                sizeof(list->low_mark)) ||
+                                   copy_to_user(&to->high_mark,
+                                                &list->high_mark,
+                                                sizeof(list->high_mark)))
+                                       return -EFAULT;
+
+                               DRM_DEBUG("%d %d %d %d %d\n",
+                                         i,
+                                         dma->bufs[i].buf_count,
+                                         dma->bufs[i].buf_size,
+                                         dma->bufs[i].freelist.low_mark,
+                                         dma->bufs[i].freelist.high_mark);
+                               ++count;
+                       }
+               }
+       }
+       request->count = count;
+
+       return 0;
+}
+
+/**
+ * Specifies a low and high water mark for buffer allocation
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg a pointer to a drm_buf_desc structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies that the size order is bounded between the admissible orders and
+ * updates the respective drm_device_dma::bufs entry low and high water mark.
+ *
+ * \note This ioctl is deprecated and mostly never used.
+ */
+int drm_markbufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_desc *request = data;
+       int order;
+       struct drm_buf_entry *entry;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       DRM_DEBUG("%d, %d, %d\n",
+                 request->size, request->low_mark, request->high_mark);
+       order = drm_order(request->size);
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       entry = &dma->bufs[order];
+
+       if (request->low_mark < 0 || request->low_mark > entry->buf_count)
+               return -EINVAL;
+       if (request->high_mark < 0 || request->high_mark > entry->buf_count)
+               return -EINVAL;
+
+       entry->freelist.low_mark = request->low_mark;
+       entry->freelist.high_mark = request->high_mark;
+
+       return 0;
+}
+
+/**
+ * Unreserve the buffers in list, previously reserved using drmDMA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_free structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls free_buffer() for each used buffer.
+ * This function is primarily used for debugging.
+ */
+int drm_freebufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_free *request = data;
+       int i;
+       int idx;
+       struct drm_buf *buf;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       DRM_DEBUG("%d\n", request->count);
+       for (i = 0; i < request->count; i++) {
+               if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
+                       return -EFAULT;
+               if (idx < 0 || idx >= dma->buf_count) {
+                       DRM_ERROR("Index %d (of %d max)\n",
+                                 idx, dma->buf_count - 1);
+                       return -EINVAL;
+               }
+               buf = dma->buflist[idx];
+               if (buf->file_priv != file_priv) {
+                       DRM_ERROR("Process %d freeing buffer not owned\n",
+                                 task_pid_nr(current));
+                       return -EINVAL;
+               }
+               drm_free_buffer(dev, buf);
+       }
+
+       return 0;
+}
+
+/**
+ * Maps all of the DMA buffers into client-virtual space (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
+ */
+int drm_mapbufs(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int retcode = 0;
+       const int zero = 0;
+       unsigned long virtual;
+       unsigned long address;
+       struct drm_buf_map *request = data;
+       int i;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       spin_lock(&dev->count_lock);
+       if (atomic_read(&dev->buf_alloc)) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       dev->buf_use++;         /* Can't allocate more after this call */
+       spin_unlock(&dev->count_lock);
+
+       if (request->count >= dma->buf_count) {
+               if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+                   || (drm_core_check_feature(dev, DRIVER_SG)
+                       && (dma->flags & _DRM_DMA_USE_SG))
+                   || (drm_core_check_feature(dev, DRIVER_FB_DMA)
+                       && (dma->flags & _DRM_DMA_USE_FB))) {
+                       struct drm_local_map *map = dev->agp_buffer_map;
+                       unsigned long token = dev->agp_buffer_token;
+
+                       if (!map) {
+                               retcode = -EINVAL;
+                               goto done;
+                       }
+                       down_write(&current->mm->mmap_sem);
+                       virtual = do_mmap(file_priv->filp, 0, map->size,
+                                         PROT_READ | PROT_WRITE,
+                                         MAP_SHARED,
+                                         token);
+                       up_write(&current->mm->mmap_sem);
+               } else {
+                       down_write(&current->mm->mmap_sem);
+                       virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
+                                         PROT_READ | PROT_WRITE,
+                                         MAP_SHARED, 0);
+                       up_write(&current->mm->mmap_sem);
+               }
+               if (virtual > -1024UL) {
+                       /* Real error */
+                       retcode = (signed long)virtual;
+                       goto done;
+               }
+               request->virtual = (void __user *)virtual;
+
+               for (i = 0; i < dma->buf_count; i++) {
+                       if (copy_to_user(&request->list[i].idx,
+                                        &dma->buflist[i]->idx,
+                                        sizeof(request->list[0].idx))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       if (copy_to_user(&request->list[i].total,
+                                        &dma->buflist[i]->total,
+                                        sizeof(request->list[0].total))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       if (copy_to_user(&request->list[i].used,
+                                        &zero, sizeof(zero))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       address = virtual + dma->buflist[i]->offset;    /* *** */
+                       if (copy_to_user(&request->list[i].address,
+                                        &address, sizeof(address))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+      done:
+       request->count = dma->buf_count;
+       DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
+
+       return retcode;
+}
+
+/**
+ * Compute size order.  Returns the exponent of the smaller power of two which
+ * is greater or equal to given number.
+ *
+ * \param size size.
+ * \return order.
+ *
+ * \todo Can be made faster.
+ */
+int drm_order(unsigned long size)
+{
+       int order;
+       unsigned long tmp;
+
+       for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+
+       if (size & (size - 1))
+               ++order;
+
+       return order;
+}
+EXPORT_SYMBOL(drm_order);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_cache.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_cache.c
new file mode 100644 (file)
index 0000000..0e3bd5b
--- /dev/null
@@ -0,0 +1,99 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+#if defined(CONFIG_X86)
+static void
+drm_clflush_page(struct page *page)
+{
+       uint8_t *page_virtual;
+       unsigned int i;
+
+       if (unlikely(page == NULL))
+               return;
+
+       page_virtual = kmap_atomic(page, KM_USER0);
+       for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+               clflush(page_virtual + i);
+       kunmap_atomic(page_virtual, KM_USER0);
+}
+
+static void drm_cache_flush_clflush(struct page *pages[],
+                                   unsigned long num_pages)
+{
+       unsigned long i;
+
+       mb();
+       for (i = 0; i < num_pages; i++)
+               drm_clflush_page(*pages++);
+       mb();
+}
+
+static void
+drm_clflush_ipi_handler(void *null)
+{
+       wbinvd();
+}
+#endif
+
+void
+drm_clflush_pages(struct page *pages[], unsigned long num_pages)
+{
+
+#if defined(CONFIG_X86)
+       if (cpu_has_clflush) {
+               drm_cache_flush_clflush(pages, num_pages);
+               return;
+       }
+
+       if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+               printk(KERN_ERR "Timed out waiting for cache flush.\n");
+
+#elif defined(__powerpc__)
+       unsigned long i;
+       for (i = 0; i < num_pages; i++) {
+               struct page *page = pages[i];
+               void *page_virtual;
+
+               if (unlikely(page == NULL))
+                       continue;
+
+               page_virtual = kmap_atomic(page, KM_USER0);
+               flush_dcache_range((unsigned long)page_virtual,
+                                  (unsigned long)page_virtual + PAGE_SIZE);
+               kunmap_atomic(page_virtual, KM_USER0);
+       }
+#else
+       printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+       WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_pages);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_context.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_context.c
new file mode 100644 (file)
index 0000000..2607753
--- /dev/null
@@ -0,0 +1,469 @@
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * ChangeLog:
+ *  2001-11-16 Torsten Duwe <duwe@caldera.de>
+ *             added context constructor/destructor hooks,
+ *             needed by SiS driver's memory management.
+ */
+
+#include "drmP.h"
+
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+/**
+ * Free a handle from the context bitmap.
+ *
+ * \param dev DRM device.
+ * \param ctx_handle context handle.
+ *
+ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
+ * lock.
+ */
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_remove(&dev->ctx_idr, ctx_handle);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Context bitmap allocation.
+ *
+ * \param dev DRM device.
+ * \return (non-negative) context handle on success or a negative number on failure.
+ *
+ * Allocate a new idr from drm_device::ctx_idr while holding the
+ * drm_device::struct_mutex lock.
+ */
+static int drm_ctxbitmap_next(struct drm_device * dev)
+{
+       int new_id;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+       mutex_lock(&dev->struct_mutex);
+       ret = idr_get_new_above(&dev->ctx_idr, NULL,
+                               DRM_RESERVED_CONTEXTS, &new_id);
+       if (ret == -EAGAIN) {
+               mutex_unlock(&dev->struct_mutex);
+               goto again;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return new_id;
+}
+
+/**
+ * Context bitmap initialization.
+ *
+ * \param dev DRM device.
+ *
+ * Initialise the drm_device::ctx_idr
+ */
+int drm_ctxbitmap_init(struct drm_device * dev)
+{
+       idr_init(&dev->ctx_idr);
+       return 0;
+}
+
+/**
+ * Context bitmap cleanup.
+ *
+ * \param dev DRM device.
+ *
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
+ */
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_remove_all(&dev->ctx_idr);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name Per Context SAREA Support */
+/*@{*/
+
+/**
+ * Get per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Gets the map from drm_device::ctx_idr with the handle specified and
+ * returns its handle.
+ */
+int drm_getsareactx(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_ctx_priv_map *request = data;
+       struct drm_local_map *map;
+       struct drm_map_list *_entry;
+
+       mutex_lock(&dev->struct_mutex);
+
+       map = idr_find(&dev->ctx_idr, request->ctx_id);
+       if (!map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->handle = NULL;
+       list_for_each_entry(_entry, &dev->maplist, head) {
+               if (_entry->map == map) {
+                       request->handle =
+                           (void *)(unsigned long)_entry->user_token;
+                       break;
+               }
+       }
+       if (request->handle == NULL)
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * Set per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the mapping specified in \p arg and update the entry in
+ * drm_device::ctx_idr with it.
+ */
+int drm_setsareactx(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_ctx_priv_map *request = data;
+       struct drm_local_map *map = NULL;
+       struct drm_map_list *r_list = NULL;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               if (r_list->map
+                   && r_list->user_token == (unsigned long) request->handle)
+                       goto found;
+       }
+      bad:
+       mutex_unlock(&dev->struct_mutex);
+       return -EINVAL;
+
+      found:
+       map = r_list->map;
+       if (!map)
+               goto bad;
+
+       if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
+               goto bad;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name The actual DRM context handling routines */
+/*@{*/
+
+/**
+ * Switch context.
+ *
+ * \param dev DRM device.
+ * \param old old context handle.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to set drm_device::context_flag.
+ */
+static int drm_context_switch(struct drm_device * dev, int old, int new)
+{
+       if (test_and_set_bit(0, &dev->context_flag)) {
+               DRM_ERROR("Reentering -- FIXME\n");
+               return -EBUSY;
+       }
+
+       DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+       if (new == dev->last_context) {
+               clear_bit(0, &dev->context_flag);
+               return 0;
+       }
+
+       return 0;
+}
+
+/**
+ * Complete context switch.
+ *
+ * \param dev DRM device.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
+ * hardware lock is held, clears the drm_device::context_flag and wakes up
+ * drm_device::context_wait.
+ */
+static int drm_context_switch_complete(struct drm_device *dev,
+                                      struct drm_file *file_priv, int new)
+{
+       dev->last_context = new;        /* PRE/POST: This is the _only_ writer. */
+       dev->last_switch = jiffies;
+
+       if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
+               DRM_ERROR("Lock isn't held after context switch\n");
+       }
+
+       /* If a context switch is ever initiated
+          when the kernel holds the lock, release
+          that lock here. */
+       clear_bit(0, &dev->context_flag);
+       wake_up(&dev->context_wait);
+
+       return 0;
+}
+
+/**
+ * Reserve contexts.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_res structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_resctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx_res *res = data;
+       struct drm_ctx ctx;
+       int i;
+
+       if (res->count >= DRM_RESERVED_CONTEXTS) {
+               memset(&ctx, 0, sizeof(ctx));
+               for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+                       ctx.handle = i;
+                       if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
+                               return -EFAULT;
+               }
+       }
+       res->count = DRM_RESERVED_CONTEXTS;
+
+       return 0;
+}
+
+/**
+ * Add context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Get a new handle for the context and copy to userspace.
+ */
+int drm_addctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx_list *ctx_entry;
+       struct drm_ctx *ctx = data;
+
+       ctx->handle = drm_ctxbitmap_next(dev);
+       if (ctx->handle == DRM_KERNEL_CONTEXT) {
+               /* Skip kernel's context and get a new one. */
+               ctx->handle = drm_ctxbitmap_next(dev);
+       }
+       DRM_DEBUG("%d\n", ctx->handle);
+       if (ctx->handle == -1) {
+               DRM_DEBUG("Not enough free contexts.\n");
+               /* Should this return -EBUSY instead? */
+               return -ENOMEM;
+       }
+
+       if (ctx->handle != DRM_KERNEL_CONTEXT) {
+               if (dev->driver->context_ctor)
+                       if (!dev->driver->context_ctor(dev, ctx->handle)) {
+                               DRM_DEBUG("Running out of ctxs or memory.\n");
+                               return -ENOMEM;
+                       }
+       }
+
+       ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
+       if (!ctx_entry) {
+               DRM_DEBUG("out of memory\n");
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&ctx_entry->head);
+       ctx_entry->handle = ctx->handle;
+       ctx_entry->tag = file_priv;
+
+       mutex_lock(&dev->ctxlist_mutex);
+       list_add(&ctx_entry->head, &dev->ctxlist);
+       ++dev->ctx_count;
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       return 0;
+}
+
+int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       /* This does nothing */
+       return 0;
+}
+
+/**
+ * Get context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       /* This is 0, because we don't handle any context flags */
+       ctx->flags = 0;
+
+       return 0;
+}
+
+/**
+ * Switch context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch().
+ */
+int drm_switchctx(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       return drm_context_switch(dev, dev->last_context, ctx->handle);
+}
+
+/**
+ * New context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch_complete().
+ */
+int drm_newctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       drm_context_switch_complete(dev, file_priv, ctx->handle);
+
+       return 0;
+}
+
+/**
+ * Remove context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
+ */
+int drm_rmctx(struct drm_device *dev, void *data,
+             struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       if (ctx->handle != DRM_KERNEL_CONTEXT) {
+               if (dev->driver->context_dtor)
+                       dev->driver->context_dtor(dev, ctx->handle);
+               drm_ctxbitmap_free(dev, ctx->handle);
+       }
+
+       mutex_lock(&dev->ctxlist_mutex);
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->handle == ctx->handle) {
+                               list_del(&pos->head);
+                               kfree(pos);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       return 0;
+}
+
+/*@}*/
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_crtc.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_crtc.c
new file mode 100644 (file)
index 0000000..5124401
--- /dev/null
@@ -0,0 +1,2654 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Keith Packard
+ *     Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include <linux/list.h>
+#include "drm.h"
+#include "drmP.h"
+#include "drm_crtc.h"
+
+struct drm_prop_enum_list {
+       int type;
+       char *name;
+};
+
+/* Avoid boilerplate.  I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list)                         \
+       char *fnname(int val)                                   \
+       {                                                       \
+               int i;                                          \
+               for (i = 0; i < ARRAY_SIZE(list); i++) {        \
+                       if (list[i].type == val)                \
+                               return list[i].name;            \
+               }                                               \
+               return "(unknown)";                             \
+       }
+
+/*
+ * Global properties
+ */
+static struct drm_prop_enum_list drm_dpms_enum_list[] =
+{      { DRM_MODE_DPMS_ON, "On" },
+       { DRM_MODE_DPMS_STANDBY, "Standby" },
+       { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+       { DRM_MODE_DPMS_OFF, "Off" }
+};
+
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/*
+ * Optional properties
+ */
+static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+{
+       { DRM_MODE_SCALE_NONE, "None" },
+       { DRM_MODE_SCALE_FULLSCREEN, "Full" },
+       { DRM_MODE_SCALE_CENTER, "Center" },
+       { DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+{
+       { DRM_MODE_DITHERING_OFF, "Off" },
+       { DRM_MODE_DITHERING_ON, "On" },
+};
+
+/*
+ * Non-global properties, but "required" for certain connectors.
+ */
+static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+                drm_dvi_i_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+                drm_tv_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+       { DRM_MODE_DIRTY_OFF,      "Off"      },
+       { DRM_MODE_DIRTY_ON,       "On"       },
+       { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+                drm_dirty_info_enum_list)
+
+struct drm_conn_prop_enum_list {
+       int type;
+       char *name;
+       int count;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+{      { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
+       { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
+       { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
+       { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
+       { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
+       { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
+       { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
+       { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
+       { DRM_MODE_CONNECTOR_Component, "Component", 0 },
+       { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
+       { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
+       { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
+       { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+       { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+};
+
+static struct drm_prop_enum_list drm_encoder_enum_list[] =
+{      { DRM_MODE_ENCODER_NONE, "None" },
+       { DRM_MODE_ENCODER_DAC, "DAC" },
+       { DRM_MODE_ENCODER_TMDS, "TMDS" },
+       { DRM_MODE_ENCODER_LVDS, "LVDS" },
+       { DRM_MODE_ENCODER_TVDAC, "TV" },
+};
+
+char *drm_get_encoder_name(struct drm_encoder *encoder)
+{
+       static char buf[32];
+
+       snprintf(buf, 32, "%s-%d",
+                drm_encoder_enum_list[encoder->encoder_type].name,
+                encoder->base.id);
+       return buf;
+}
+EXPORT_SYMBOL(drm_get_encoder_name);
+
+char *drm_get_connector_name(struct drm_connector *connector)
+{
+       static char buf[32];
+
+       snprintf(buf, 32, "%s-%d",
+                drm_connector_enum_list[connector->connector_type].name,
+                connector->connector_type_id);
+       return buf;
+}
+EXPORT_SYMBOL(drm_get_connector_name);
+
+char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+       if (status == connector_status_connected)
+               return "connected";
+       else if (status == connector_status_disconnected)
+               return "disconnected";
+       else
+               return "unknown";
+}
+
+/**
+ * drm_mode_object_get - allocate a new identifier
+ * @dev: DRM device
+ * @ptr: object pointer, used to generate unique ID
+ * @type: object type
+ *
+ * LOCKING:
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space.  Used
+ * for tracking modes, CRTCs and connectors.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+static int drm_mode_object_get(struct drm_device *dev,
+                              struct drm_mode_object *obj, uint32_t obj_type)
+{
+       int new_id = 0;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Ran out memory getting a mode number\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.idr_mutex);
+       ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+       mutex_unlock(&dev->mode_config.idr_mutex);
+       if (ret == -EAGAIN)
+               goto again;
+
+       obj->id = new_id;
+       obj->type = obj_type;
+       return 0;
+}
+
+/**
+ * drm_mode_object_put - free an identifer
+ * @dev: DRM device
+ * @id: ID to free
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+static void drm_mode_object_put(struct drm_device *dev,
+                               struct drm_mode_object *object)
+{
+       mutex_lock(&dev->mode_config.idr_mutex);
+       idr_remove(&dev->mode_config.crtc_idr, object->id);
+       mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+               uint32_t id, uint32_t type)
+{
+       struct drm_mode_object *obj = NULL;
+
+       mutex_lock(&dev->mode_config.idr_mutex);
+       obj = idr_find(&dev->mode_config.crtc_idr, id);
+       if (!obj || (obj->type != type) || (obj->id != id))
+               obj = NULL;
+       mutex_unlock(&dev->mode_config.idr_mutex);
+
+       return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+                        const struct drm_framebuffer_funcs *funcs)
+{
+       int ret;
+
+       ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+       if (ret) {
+               return ret;
+       }
+
+       fb->dev = dev;
+       fb->funcs = funcs;
+       dev->mode_config.num_fb++;
+       list_add(&fb->head, &dev->mode_config.fb_list);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs in @dev's mode_config.  If they're using @fb, removes
+ * it, setting it to NULL.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = fb->dev;
+       struct drm_crtc *crtc;
+       struct drm_mode_set set;
+       int ret;
+
+       /* remove from any CRTC */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (crtc->fb == fb) {
+                       /* should turn off the crtc */
+                       memset(&set, 0, sizeof(struct drm_mode_set));
+                       set.crtc = crtc;
+                       set.fb = NULL;
+                       ret = crtc->funcs->set_config(&set);
+                       if (ret)
+                               DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+               }
+       }
+
+       drm_mode_object_put(dev, &fb->base);
+       list_del(&fb->head);
+       dev->mode_config.num_fb--;
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_crtc_init - Initialise a new CRTC object
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Inits a new object created as base part of an driver crtc object.
+ */
+void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+                  const struct drm_crtc_funcs *funcs)
+{
+       crtc->dev = dev;
+       crtc->funcs = funcs;
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+
+       list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+       dev->mode_config.num_crtc++;
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_crtc_init);
+
+/**
+ * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * @crtc: CRTC to cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Cleanup @crtc. Removes from drm modesetting space
+ * does NOT free object, caller does that.
+ */
+void drm_crtc_cleanup(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+
+       if (crtc->gamma_store) {
+               kfree(crtc->gamma_store);
+               crtc->gamma_store = NULL;
+       }
+
+       drm_mode_object_put(dev, &crtc->base);
+       list_del(&crtc->head);
+       dev->mode_config.num_crtc--;
+}
+EXPORT_SYMBOL(drm_crtc_cleanup);
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Add @mode to @connector's mode list for later use.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+                        struct drm_display_mode *mode)
+{
+       list_add(&mode->head, &connector->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Remove @mode from @connector's mode list, then free it.
+ */
+void drm_mode_remove(struct drm_connector *connector,
+                    struct drm_display_mode *mode)
+{
+       list_del(&mode->head);
+       kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_remove);
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @name: user visible name of the connector
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ */
+void drm_connector_init(struct drm_device *dev,
+                    struct drm_connector *connector,
+                    const struct drm_connector_funcs *funcs,
+                    int connector_type)
+{
+       mutex_lock(&dev->mode_config.mutex);
+
+       connector->dev = dev;
+       connector->funcs = funcs;
+       drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+       connector->connector_type = connector_type;
+       connector->connector_type_id =
+               ++drm_connector_enum_list[connector_type].count; /* TODO */
+       INIT_LIST_HEAD(&connector->user_modes);
+       INIT_LIST_HEAD(&connector->probed_modes);
+       INIT_LIST_HEAD(&connector->modes);
+       connector->edid_blob_ptr = NULL;
+
+       list_add_tail(&connector->head, &dev->mode_config.connector_list);
+       dev->mode_config.num_connector++;
+
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.edid_property, 0);
+
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.dpms_property, 0);
+
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode, *t;
+
+       list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+               drm_mode_remove(connector, mode);
+
+       list_for_each_entry_safe(mode, t, &connector->modes, head)
+               drm_mode_remove(connector, mode);
+
+       list_for_each_entry_safe(mode, t, &connector->user_modes, head)
+               drm_mode_remove(connector, mode);
+
+       kfree(connector->fb_helper_private);
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_put(dev, &connector->base);
+       list_del(&connector->head);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+void drm_encoder_init(struct drm_device *dev,
+                     struct drm_encoder *encoder,
+                     const struct drm_encoder_funcs *funcs,
+                     int encoder_type)
+{
+       mutex_lock(&dev->mode_config.mutex);
+
+       encoder->dev = dev;
+
+       drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+       encoder->encoder_type = encoder_type;
+       encoder->funcs = funcs;
+
+       list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+       dev->mode_config.num_encoder++;
+
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_put(dev, &encoder->base);
+       list_del(&encoder->head);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+       struct drm_display_mode *nmode;
+
+       nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+       if (!nmode)
+               return NULL;
+
+       drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
+       return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       drm_mode_object_put(dev, &mode->base);
+
+       kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+{
+       struct drm_property *edid;
+       struct drm_property *dpms;
+       int i;
+
+       /*
+        * Standard properties (apply to all connectors)
+        */
+       edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+                                  DRM_MODE_PROP_IMMUTABLE,
+                                  "EDID", 0);
+       dev->mode_config.edid_property = edid;
+
+       dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                  "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
+               drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
+                                     drm_dpms_enum_list[i].name);
+       dev->mode_config.dpms_property = dpms;
+
+       return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+       struct drm_property *dvi_i_selector;
+       struct drm_property *dvi_i_subconnector;
+       int i;
+
+       if (dev->mode_config.dvi_i_select_subconnector_property)
+               return 0;
+
+       dvi_i_selector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                   "select subconnector",
+                                   ARRAY_SIZE(drm_dvi_i_select_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
+               drm_property_add_enum(dvi_i_selector, i,
+                                     drm_dvi_i_select_enum_list[i].type,
+                                     drm_dvi_i_select_enum_list[i].name);
+       dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+       dvi_i_subconnector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE,
+                                   "subconnector",
+                                   ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
+               drm_property_add_enum(dvi_i_subconnector, i,
+                                     drm_dvi_i_subconnector_enum_list[i].type,
+                                     drm_dvi_i_subconnector_enum_list[i].name);
+       dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.  Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+                                 char *modes[])
+{
+       struct drm_property *tv_selector;
+       struct drm_property *tv_subconnector;
+       int i;
+
+       if (dev->mode_config.tv_select_subconnector_property)
+               return 0;
+
+       /*
+        * Basic connector properties
+        */
+       tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                         "select subconnector",
+                                         ARRAY_SIZE(drm_tv_select_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
+               drm_property_add_enum(tv_selector, i,
+                                     drm_tv_select_enum_list[i].type,
+                                     drm_tv_select_enum_list[i].name);
+       dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+       tv_subconnector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE, "subconnector",
+                                   ARRAY_SIZE(drm_tv_subconnector_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
+               drm_property_add_enum(tv_subconnector, i,
+                                     drm_tv_subconnector_enum_list[i].type,
+                                     drm_tv_subconnector_enum_list[i].name);
+       dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+       /*
+        * Other, TV specific properties: margins & TV modes.
+        */
+       dev->mode_config.tv_left_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "left margin", 2);
+       dev->mode_config.tv_left_margin_property->values[0] = 0;
+       dev->mode_config.tv_left_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_right_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "right margin", 2);
+       dev->mode_config.tv_right_margin_property->values[0] = 0;
+       dev->mode_config.tv_right_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_top_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "top margin", 2);
+       dev->mode_config.tv_top_margin_property->values[0] = 0;
+       dev->mode_config.tv_top_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_bottom_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "bottom margin", 2);
+       dev->mode_config.tv_bottom_margin_property->values[0] = 0;
+       dev->mode_config.tv_bottom_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_mode_property =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                   "mode", num_modes);
+       for (i = 0; i < num_modes; i++)
+               drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+                                     i, modes[i]);
+
+       dev->mode_config.tv_brightness_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "brightness", 2);
+       dev->mode_config.tv_brightness_property->values[0] = 0;
+       dev->mode_config.tv_brightness_property->values[1] = 100;
+
+       dev->mode_config.tv_contrast_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "contrast", 2);
+       dev->mode_config.tv_contrast_property->values[0] = 0;
+       dev->mode_config.tv_contrast_property->values[1] = 100;
+
+       dev->mode_config.tv_flicker_reduction_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "flicker reduction", 2);
+       dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
+       dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
+
+       dev->mode_config.tv_overscan_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "overscan", 2);
+       dev->mode_config.tv_overscan_property->values[0] = 0;
+       dev->mode_config.tv_overscan_property->values[1] = 100;
+
+       dev->mode_config.tv_saturation_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "saturation", 2);
+       dev->mode_config.tv_saturation_property->values[0] = 0;
+       dev->mode_config.tv_saturation_property->values[1] = 100;
+
+       dev->mode_config.tv_hue_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "hue", 2);
+       dev->mode_config.tv_hue_property->values[0] = 0;
+       dev->mode_config.tv_hue_property->values[1] = 100;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+       struct drm_property *scaling_mode;
+       int i;
+
+       if (dev->mode_config.scaling_mode_property)
+               return 0;
+
+       scaling_mode =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
+                                   ARRAY_SIZE(drm_scaling_mode_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
+               drm_property_add_enum(scaling_mode, i,
+                                     drm_scaling_mode_enum_list[i].type,
+                                     drm_scaling_mode_enum_list[i].name);
+
+       dev->mode_config.scaling_mode_property = scaling_mode;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_dithering_property - create dithering property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dithering_property(struct drm_device *dev)
+{
+       struct drm_property *dithering_mode;
+       int i;
+
+       if (dev->mode_config.dithering_mode_property)
+               return 0;
+
+       dithering_mode =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
+                                   ARRAY_SIZE(drm_dithering_mode_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
+               drm_property_add_enum(dithering_mode, i,
+                                     drm_dithering_mode_enum_list[i].type,
+                                     drm_dithering_mode_enum_list[i].name);
+       dev->mode_config.dithering_mode_property = dithering_mode;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dithering_property);
+
+/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+       struct drm_property *dirty_info;
+       int i;
+
+       if (dev->mode_config.dirty_info_property)
+               return 0;
+
+       dirty_info =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE,
+                                   "dirty",
+                                   ARRAY_SIZE(drm_dirty_info_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
+               drm_property_add_enum(dirty_info, i,
+                                     drm_dirty_info_enum_list[i].type,
+                                     drm_dirty_info_enum_list[i].name);
+       dev->mode_config.dirty_info_property = dirty_info;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * None, should happen single threaded at init time.
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+       mutex_init(&dev->mode_config.mutex);
+       mutex_init(&dev->mode_config.idr_mutex);
+       INIT_LIST_HEAD(&dev->mode_config.fb_list);
+       INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
+       INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+       INIT_LIST_HEAD(&dev->mode_config.connector_list);
+       INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+       idr_init(&dev->mode_config.crtc_idr);
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_create_standard_connector_properties(dev);
+       mutex_unlock(&dev->mode_config.mutex);
+
+       /* Just to be sure */
+       dev->mode_config.num_fb = 0;
+       dev->mode_config.num_connector = 0;
+       dev->mode_config.num_crtc = 0;
+       dev->mode_config.num_encoder = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+{
+       uint32_t total_objects = 0;
+
+       total_objects += dev->mode_config.num_crtc;
+       total_objects += dev->mode_config.num_connector;
+       total_objects += dev->mode_config.num_encoder;
+
+       if (total_objects == 0)
+               return -EINVAL;
+
+       group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
+       if (!group->id_list)
+               return -ENOMEM;
+
+       group->num_crtcs = 0;
+       group->num_connectors = 0;
+       group->num_encoders = 0;
+       return 0;
+}
+
+int drm_mode_group_init_legacy_group(struct drm_device *dev,
+                                    struct drm_mode_group *group)
+{
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       struct drm_connector *connector;
+       int ret;
+
+       if ((ret = drm_mode_group_init(dev, group)))
+               return ret;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               group->id_list[group->num_crtcs++] = crtc->base.id;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+               group->id_list[group->num_crtcs + group->num_encoders++] =
+               encoder->base.id;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               group->id_list[group->num_crtcs + group->num_encoders +
+                              group->num_connectors++] = connector->base.id;
+
+       return 0;
+}
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+       struct drm_connector *connector, *ot;
+       struct drm_crtc *crtc, *ct;
+       struct drm_encoder *encoder, *enct;
+       struct drm_framebuffer *fb, *fbt;
+       struct drm_property *property, *pt;
+
+       list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+                                head) {
+               encoder->funcs->destroy(encoder);
+       }
+
+       list_for_each_entry_safe(connector, ot,
+                                &dev->mode_config.connector_list, head) {
+               connector->funcs->destroy(connector);
+       }
+
+       list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+                                head) {
+               drm_property_destroy(dev, property);
+       }
+
+       list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+               fb->funcs->destroy(fb);
+       }
+
+       list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+               crtc->funcs->destroy(crtc);
+       }
+
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+                              struct drm_display_mode *in)
+{
+       out->clock = in->clock;
+       out->hdisplay = in->hdisplay;
+       out->hsync_start = in->hsync_start;
+       out->hsync_end = in->hsync_end;
+       out->htotal = in->htotal;
+       out->hskew = in->hskew;
+       out->vdisplay = in->vdisplay;
+       out->vsync_start = in->vsync_start;
+       out->vsync_end = in->vsync_end;
+       out->vtotal = in->vtotal;
+       out->vscan = in->vscan;
+       out->vrefresh = in->vrefresh;
+       out->flags = in->flags;
+       out->type = in->type;
+       strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+       out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ */
+void drm_crtc_convert_umode(struct drm_display_mode *out,
+                           struct drm_mode_modeinfo *in)
+{
+       out->clock = in->clock;
+       out->hdisplay = in->hdisplay;
+       out->hsync_start = in->hsync_start;
+       out->hsync_end = in->hsync_end;
+       out->htotal = in->htotal;
+       out->hskew = in->hskew;
+       out->vdisplay = in->vdisplay;
+       out->vsync_start = in->vsync_start;
+       out->vsync_end = in->vsync_end;
+       out->vtotal = in->vtotal;
+       out->vscan = in->vscan;
+       out->vrefresh = in->vrefresh;
+       out->flags = in->flags;
+       out->type = in->type;
+       strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+       out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_card_res *card_res = data;
+       struct list_head *lh;
+       struct drm_framebuffer *fb;
+       struct drm_connector *connector;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       int ret = 0;
+       int connector_count = 0;
+       int crtc_count = 0;
+       int fb_count = 0;
+       int encoder_count = 0;
+       int copied = 0, i;
+       uint32_t __user *fb_id;
+       uint32_t __user *crtc_id;
+       uint32_t __user *connector_id;
+       uint32_t __user *encoder_id;
+       struct drm_mode_group *mode_group;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /*
+        * For the non-control nodes we need to limit the list of resources
+        * by IDs in the group list for this node
+        */
+       list_for_each(lh, &file_priv->fbs)
+               fb_count++;
+
+       mode_group = &file_priv->master->minor->mode_group;
+       if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+
+               list_for_each(lh, &dev->mode_config.crtc_list)
+                       crtc_count++;
+
+               list_for_each(lh, &dev->mode_config.connector_list)
+                       connector_count++;
+
+               list_for_each(lh, &dev->mode_config.encoder_list)
+                       encoder_count++;
+       } else {
+
+               crtc_count = mode_group->num_crtcs;
+               connector_count = mode_group->num_connectors;
+               encoder_count = mode_group->num_encoders;
+       }
+
+       card_res->max_height = dev->mode_config.max_height;
+       card_res->min_height = dev->mode_config.min_height;
+       card_res->max_width = dev->mode_config.max_width;
+       card_res->min_width = dev->mode_config.min_width;
+
+       /* handle this in 4 parts */
+       /* FBs */
+       if (card_res->count_fbs >= fb_count) {
+               copied = 0;
+               fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+               list_for_each_entry(fb, &file_priv->fbs, head) {
+                       if (put_user(fb->base.id, fb_id + copied)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       card_res->count_fbs = fb_count;
+
+       /* CRTCs */
+       if (card_res->count_crtcs >= crtc_count) {
+               copied = 0;
+               crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                                           head) {
+                               DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id);
+                               if (put_user(crtc->base.id, crtc_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       for (i = 0; i < mode_group->num_crtcs; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            crtc_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       card_res->count_crtcs = crtc_count;
+
+       /* Encoders */
+       if (card_res->count_encoders >= encoder_count) {
+               copied = 0;
+               encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(encoder,
+                                           &dev->mode_config.encoder_list,
+                                           head) {
+                               DRM_DEBUG_KMS("ENCODER ID is %d\n",
+                                         encoder->base.id);
+                               if (put_user(encoder->base.id, encoder_id +
+                                            copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            encoder_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+
+               }
+       }
+       card_res->count_encoders = encoder_count;
+
+       /* Connectors */
+       if (card_res->count_connectors >= connector_count) {
+               copied = 0;
+               connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(connector,
+                                           &dev->mode_config.connector_list,
+                                           head) {
+                               DRM_DEBUG_KMS("CONNECTOR ID is %d\n",
+                                         connector->base.id);
+                               if (put_user(connector->base.id,
+                                            connector_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       int start = mode_group->num_crtcs +
+                               mode_group->num_encoders;
+                       for (i = start; i < start + mode_group->num_connectors; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            connector_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       card_res->count_connectors = connector_count;
+
+       DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs,
+                 card_res->count_connectors, card_res->count_encoders);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getcrtc(struct drm_device *dev,
+                    void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc *crtc_resp = data;
+       struct drm_crtc *crtc;
+       struct drm_mode_object *obj;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+                                  DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       crtc_resp->x = crtc->x;
+       crtc_resp->y = crtc->y;
+       crtc_resp->gamma_size = crtc->gamma_size;
+       if (crtc->fb)
+               crtc_resp->fb_id = crtc->fb->base.id;
+       else
+               crtc_resp->fb_id = 0;
+
+       if (crtc->enabled) {
+
+               drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+               crtc_resp->mode_valid = 1;
+
+       } else {
+               crtc_resp->mode_valid = 0;
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getconnector - get connector configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a connector configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_get_connector *out_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_connector *connector;
+       struct drm_display_mode *mode;
+       int mode_count = 0;
+       int props_count = 0;
+       int encoders_count = 0;
+       int ret = 0;
+       int copied = 0;
+       int i;
+       struct drm_mode_modeinfo u_mode;
+       struct drm_mode_modeinfo __user *mode_ptr;
+       uint32_t __user *prop_ptr;
+       uint64_t __user *prop_values;
+       uint32_t __user *encoder_ptr;
+
+       memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+       DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id);
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, out_resp->connector_id,
+                                  DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] != 0) {
+                       props_count++;
+               }
+       }
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] != 0) {
+                       encoders_count++;
+               }
+       }
+
+       if (out_resp->count_modes == 0) {
+               connector->funcs->fill_modes(connector,
+                                            dev->mode_config.max_width,
+                                            dev->mode_config.max_height);
+       }
+
+       /* delayed so we get modes regardless of pre-fill_modes state */
+       list_for_each_entry(mode, &connector->modes, head)
+               mode_count++;
+
+       out_resp->connector_id = connector->base.id;
+       out_resp->connector_type = connector->connector_type;
+       out_resp->connector_type_id = connector->connector_type_id;
+       out_resp->mm_width = connector->display_info.width_mm;
+       out_resp->mm_height = connector->display_info.height_mm;
+       out_resp->subpixel = connector->display_info.subpixel_order;
+       out_resp->connection = connector->status;
+       if (connector->encoder)
+               out_resp->encoder_id = connector->encoder->base.id;
+       else
+               out_resp->encoder_id = 0;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if ((out_resp->count_modes >= mode_count) && mode_count) {
+               copied = 0;
+               mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+               list_for_each_entry(mode, &connector->modes, head) {
+                       drm_crtc_convert_to_umode(&u_mode, mode);
+                       if (copy_to_user(mode_ptr + copied,
+                                        &u_mode, sizeof(u_mode))) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       out_resp->count_modes = mode_count;
+
+       if ((out_resp->count_props >= props_count) && props_count) {
+               copied = 0;
+               prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
+               prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+               for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+                       if (connector->property_ids[i] != 0) {
+                               if (put_user(connector->property_ids[i],
+                                            prop_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+
+                               if (put_user(connector->property_values[i],
+                                            prop_values + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_props = props_count;
+
+       if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+               copied = 0;
+               encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+                       if (connector->encoder_ids[i] != 0) {
+                               if (put_user(connector->encoder_ids[i],
+                                            encoder_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_encoders = encoders_count;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_mode_get_encoder *enc_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+                                  DRM_MODE_OBJECT_ENCODER);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       encoder = obj_to_encoder(obj);
+
+       if (encoder->crtc)
+               enc_resp->crtc_id = encoder->crtc->base.id;
+       else
+               enc_resp->crtc_id = 0;
+       enc_resp->encoder_type = encoder->encoder_type;
+       enc_resp->encoder_id = encoder->base.id;
+       enc_resp->possible_crtcs = encoder->possible_crtcs;
+       enc_resp->possible_clones = encoder->possible_clones;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setcrtc(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_mode_crtc *crtc_req = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc, *crtcfb;
+       struct drm_connector **connector_set = NULL, *connector;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_display_mode *mode = NULL;
+       struct drm_mode_set set;
+       uint32_t __user *set_connectors_ptr;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+                                  DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       if (crtc_req->mode_valid) {
+               /* If we have a mode we need a framebuffer. */
+               /* If we pass -1, set the mode with the currently bound fb */
+               if (crtc_req->fb_id == -1) {
+                       list_for_each_entry(crtcfb,
+                                           &dev->mode_config.crtc_list, head) {
+                               if (crtcfb == crtc) {
+                                       DRM_DEBUG_KMS("Using current fb for "
+                                                       "setmode\n");
+                                       fb = crtc->fb;
+                               }
+                       }
+               } else {
+                       obj = drm_mode_object_find(dev, crtc_req->fb_id,
+                                                  DRM_MODE_OBJECT_FB);
+                       if (!obj) {
+                               DRM_DEBUG_KMS("Unknown FB ID%d\n",
+                                               crtc_req->fb_id);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       fb = obj_to_fb(obj);
+               }
+
+               mode = drm_mode_create(dev);
+               drm_crtc_convert_umode(mode, &crtc_req->mode);
+               drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+       }
+
+       if (crtc_req->count_connectors == 0 && mode) {
+               DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
+               DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
+                         crtc_req->count_connectors);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->count_connectors > 0) {
+               u32 out_id;
+
+               /* Avoid unbounded kernel memory allocation */
+               if (crtc_req->count_connectors > config->num_connector) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               connector_set = kmalloc(crtc_req->count_connectors *
+                                       sizeof(struct drm_connector *),
+                                       GFP_KERNEL);
+               if (!connector_set) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               for (i = 0; i < crtc_req->count_connectors; i++) {
+                       set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+                       if (get_user(out_id, &set_connectors_ptr[i])) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+
+                       obj = drm_mode_object_find(dev, out_id,
+                                                  DRM_MODE_OBJECT_CONNECTOR);
+                       if (!obj) {
+                               DRM_DEBUG_KMS("Connector id %d unknown\n",
+                                               out_id);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       connector = obj_to_connector(obj);
+
+                       connector_set[i] = connector;
+               }
+       }
+
+       set.crtc = crtc;
+       set.x = crtc_req->x;
+       set.y = crtc_req->y;
+       set.mode = mode;
+       set.connectors = connector_set;
+       set.num_connectors = crtc_req->count_connectors;
+       set.fb = fb;
+       ret = crtc->funcs->set_config(&set);
+
+out:
+       kfree(connector_set);
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+                       void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_cursor *req = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       if (!req->flags) {
+               DRM_ERROR("no operation set\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       if (req->flags & DRM_MODE_CURSOR_BO) {
+               if (!crtc->funcs->cursor_set) {
+                       DRM_ERROR("crtc does not support cursor\n");
+                       ret = -ENXIO;
+                       goto out;
+               }
+               /* Turns off the cursor if handle is 0 */
+               ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+                                             req->width, req->height);
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_MOVE) {
+               if (crtc->funcs->cursor_move) {
+                       ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+               } else {
+                       DRM_ERROR("crtc does not support cursor\n");
+                       ret = -EFAULT;
+                       goto out;
+               }
+       }
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       if ((config->min_width > r->width) || (r->width > config->max_width)) {
+               DRM_ERROR("mode new framebuffer width not within limits\n");
+               return -EINVAL;
+       }
+       if ((config->min_height > r->height) || (r->height > config->max_height)) {
+               DRM_ERROR("mode new framebuffer height not within limits\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /* TODO check buffer is sufficently large */
+       /* TODO setup destructor callback */
+
+       fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+       if (!fb) {
+               DRM_ERROR("could not create framebuffer\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       r->fb_id = fb->base.id;
+       list_add(&fb->filp_head, &file_priv->fbs);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_framebuffer *fbl = NULL;
+       uint32_t *id = data;
+       int ret = 0;
+       int found = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
+       /* TODO check that we realy get a framebuffer back. */
+       if (!obj) {
+               DRM_ERROR("mode invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       fb = obj_to_fb(obj);
+
+       list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+               if (fb == fbl)
+                       found = 1;
+
+       if (!found) {
+               DRM_ERROR("tried to remove a fb that we didn't own\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* TODO release all crtc connected to the framebuffer */
+       /* TODO unhock the destructor from the buffer object */
+
+       list_del(&fb->filp_head);
+       fb->funcs->destroy(fb);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj) {
+               DRM_ERROR("invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       fb = obj_to_fb(obj);
+
+       r->height = fb->height;
+       r->width = fb->width;
+       r->depth = fb->depth;
+       r->bpp = fb->bits_per_pixel;
+       r->pitch = fb->pitch;
+       fb->funcs->create_handle(fb, file_priv, &r->handle);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_clip_rect __user *clips_ptr;
+       struct drm_clip_rect *clips = NULL;
+       struct drm_mode_fb_dirty_cmd *r = data;
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb;
+       unsigned flags;
+       int num_clips;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj) {
+               DRM_ERROR("invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out_err1;
+       }
+       fb = obj_to_fb(obj);
+
+       num_clips = r->num_clips;
+       clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+       if (!num_clips != !clips_ptr) {
+               ret = -EINVAL;
+               goto out_err1;
+       }
+
+       flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+       /* If userspace annotates copy, clips must come in pairs */
+       if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+               ret = -EINVAL;
+               goto out_err1;
+       }
+
+       if (num_clips && clips_ptr) {
+               clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+               if (!clips) {
+                       ret = -ENOMEM;
+                       goto out_err1;
+               }
+
+               ret = copy_from_user(clips, clips_ptr,
+                                    num_clips * sizeof(*clips));
+               if (ret)
+                       goto out_err2;
+       }
+
+       if (fb->funcs->dirty) {
+               ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+       } else {
+               ret = -ENOSYS;
+               goto out_err2;
+       }
+
+out_err2:
+       kfree(clips);
+out_err1:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @filp: file * from the ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_framebuffer *fb, *tfb;
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+               list_del(&fb->filp_head);
+               fb->funcs->destroy(fb);
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+/**
+ * drm_mode_attachmode - add a mode to the user mode list
+ * @dev: DRM device
+ * @connector: connector to add the mode to
+ * @mode: mode to add
+ *
+ * Add @mode to @connector's user mode list.
+ */
+static int drm_mode_attachmode(struct drm_device *dev,
+                              struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       int ret = 0;
+
+       list_add_tail(&mode->head, &connector->user_modes);
+       return ret;
+}
+
+int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
+                            struct drm_display_mode *mode)
+{
+       struct drm_connector *connector;
+       int ret = 0;
+       struct drm_display_mode *dup_mode;
+       int need_dup = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (!connector->encoder)
+                       break;
+               if (connector->encoder->crtc == crtc) {
+                       if (need_dup)
+                               dup_mode = drm_mode_duplicate(dev, mode);
+                       else
+                               dup_mode = mode;
+                       ret = drm_mode_attachmode(dev, connector, dup_mode);
+                       if (ret)
+                               return ret;
+                       need_dup = 1;
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
+
+static int drm_mode_detachmode(struct drm_device *dev,
+                              struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       int found = 0;
+       int ret = 0;
+       struct drm_display_mode *match_mode, *t;
+
+       list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
+               if (drm_mode_equal(match_mode, mode)) {
+                       list_del(&match_mode->head);
+                       drm_mode_destroy(dev, match_mode);
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               ret = -EINVAL;
+
+       return ret;
+}
+
+int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               drm_mode_detachmode(dev, connector, mode);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
+
+/**
+ * drm_fb_attachmode - Attach a user mode to an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * This attaches a user specified mode to an connector.
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_attachmode_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_mode_cmd *mode_cmd = data;
+       struct drm_connector *connector;
+       struct drm_display_mode *mode;
+       struct drm_mode_object *obj;
+       struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       mode = drm_mode_create(dev);
+       if (!mode) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       drm_crtc_convert_umode(mode, umode);
+
+       ret = drm_mode_attachmode(dev, connector, mode);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+
+/**
+ * drm_fb_detachmode - Detach a user specified mode from an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_detachmode_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_mode_cmd *mode_cmd = data;
+       struct drm_connector *connector;
+       struct drm_display_mode mode;
+       struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       drm_crtc_convert_umode(&mode, umode);
+       ret = drm_mode_detachmode(dev, connector, &mode);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+                                        const char *name, int num_values)
+{
+       struct drm_property *property = NULL;
+
+       property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+       if (!property)
+               return NULL;
+
+       if (num_values) {
+               property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+               if (!property->values)
+                       goto fail;
+       }
+
+       drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+       property->flags = flags;
+       property->num_values = num_values;
+       INIT_LIST_HEAD(&property->enum_blob_list);
+
+       if (name)
+               strncpy(property->name, name, DRM_PROP_NAME_LEN);
+
+       list_add_tail(&property->head, &dev->mode_config.property_list);
+       return property;
+fail:
+       kfree(property);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+int drm_property_add_enum(struct drm_property *property, int index,
+                         uint64_t value, const char *name)
+{
+       struct drm_property_enum *prop_enum;
+
+       if (!(property->flags & DRM_MODE_PROP_ENUM))
+               return -EINVAL;
+
+       if (!list_empty(&property->enum_blob_list)) {
+               list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+                       if (prop_enum->value == value) {
+                               strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+                               prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+                               return 0;
+                       }
+               }
+       }
+
+       prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+       if (!prop_enum)
+               return -ENOMEM;
+
+       strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+       prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+       prop_enum->value = value;
+
+       property->values[index] = value;
+       list_add_tail(&prop_enum->head, &property->enum_blob_list);
+       return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+       struct drm_property_enum *prop_enum, *pt;
+
+       list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+               list_del(&prop_enum->head);
+               kfree(prop_enum);
+       }
+
+       if (property->num_values)
+               kfree(property->values);
+       drm_mode_object_put(dev, &property->base);
+       list_del(&property->head);
+       kfree(property);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+int drm_connector_attach_property(struct drm_connector *connector,
+                              struct drm_property *property, uint64_t init_val)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == 0) {
+                       connector->property_ids[i] = property->base.id;
+                       connector->property_values[i] = init_val;
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_property);
+
+int drm_connector_property_set_value(struct drm_connector *connector,
+                                 struct drm_property *property, uint64_t value)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == property->base.id) {
+                       connector->property_values[i] = value;
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+                                 struct drm_property *property, uint64_t *val)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == property->base.id) {
+                       *val = connector->property_values[i];
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_get_property *out_resp = data;
+       struct drm_property *property;
+       int enum_count = 0;
+       int blob_count = 0;
+       int value_count = 0;
+       int ret = 0, i;
+       int copied;
+       struct drm_property_enum *prop_enum;
+       struct drm_mode_property_enum __user *enum_ptr;
+       struct drm_property_blob *prop_blob;
+       uint32_t *blob_id_ptr;
+       uint64_t __user *values_ptr;
+       uint32_t __user *blob_length_ptr;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+       if (!obj) {
+               ret = -EINVAL;
+               goto done;
+       }
+       property = obj_to_property(obj);
+
+       if (property->flags & DRM_MODE_PROP_ENUM) {
+               list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+                       enum_count++;
+       } else if (property->flags & DRM_MODE_PROP_BLOB) {
+               list_for_each_entry(prop_blob, &property->enum_blob_list, head)
+                       blob_count++;
+       }
+
+       value_count = property->num_values;
+
+       strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+       out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+       out_resp->flags = property->flags;
+
+       if ((out_resp->count_values >= value_count) && value_count) {
+               values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+               for (i = 0; i < value_count; i++) {
+                       if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+                               ret = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+       out_resp->count_values = value_count;
+
+       if (property->flags & DRM_MODE_PROP_ENUM) {
+               if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+                       copied = 0;
+                       enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+                       list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+
+                               if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               if (copy_to_user(&enum_ptr[copied].name,
+                                                &prop_enum->name, DRM_PROP_NAME_LEN)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+                               copied++;
+                       }
+               }
+               out_resp->count_enum_blobs = enum_count;
+       }
+
+       if (property->flags & DRM_MODE_PROP_BLOB) {
+               if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+                       copied = 0;
+                       blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
+                       blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+
+                       list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+                               if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               if (put_user(prop_blob->length, blob_length_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               copied++;
+                       }
+               }
+               out_resp->count_enum_blobs = blob_count;
+       }
+done:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
+                                                         void *data)
+{
+       struct drm_property_blob *blob;
+
+       if (!length || !data)
+               return NULL;
+
+       blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+       if (!blob)
+               return NULL;
+
+       blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
+       blob->length = length;
+
+       memcpy(blob->data, data, length);
+
+       drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+
+       list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
+       return blob;
+}
+
+static void drm_property_destroy_blob(struct drm_device *dev,
+                              struct drm_property_blob *blob)
+{
+       drm_mode_object_put(dev, &blob->base);
+       list_del(&blob->head);
+       kfree(blob);
+}
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_get_blob *out_resp = data;
+       struct drm_property_blob *blob;
+       int ret = 0;
+       void *blob_ptr;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+       if (!obj) {
+               ret = -EINVAL;
+               goto done;
+       }
+       blob = obj_to_blob(obj);
+
+       if (out_resp->length == blob->length) {
+               blob_ptr = (void *)(unsigned long)out_resp->data;
+               if (copy_to_user(blob_ptr, blob->data, blob->length)){
+                       ret = -EFAULT;
+                       goto done;
+               }
+       }
+       out_resp->length = blob->length;
+
+done:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+                                           struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       int ret = 0;
+
+       if (connector->edid_blob_ptr)
+               drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+
+       /* Delete edid, when there is none. */
+       if (!edid) {
+               connector->edid_blob_ptr = NULL;
+               ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+               return ret;
+       }
+
+       connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+
+       ret = drm_connector_property_set_value(connector,
+                                              dev->mode_config.edid_property,
+                                              connector->edid_blob_ptr->base.id);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+                                      void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_connector_set_property *out_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_property *property;
+       struct drm_connector *connector;
+       int ret = -EINVAL;
+       int i;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == out_resp->prop_id)
+                       break;
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+               goto out;
+       }
+
+       obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+       if (!obj) {
+               goto out;
+       }
+       property = obj_to_property(obj);
+
+       if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+               goto out;
+
+       if (property->flags & DRM_MODE_PROP_RANGE) {
+               if (out_resp->value < property->values[0])
+                       goto out;
+
+               if (out_resp->value > property->values[1])
+                       goto out;
+       } else {
+               int found = 0;
+               for (i = 0; i < property->num_values; i++) {
+                       if (property->values[i] == out_resp->value) {
+                               found = 1;
+                               break;
+                       }
+               }
+               if (!found) {
+                       goto out;
+               }
+       }
+
+       /* Do DPMS ourselves */
+       if (property == connector->dev->mode_config.dpms_property) {
+               if (connector->funcs->dpms)
+                       (*connector->funcs->dpms)(connector, (int) out_resp->value);
+               ret = 0;
+       } else if (connector->funcs->set_property)
+               ret = connector->funcs->set_property(connector, property, out_resp->value);
+
+       /* store the property value if successful */
+       if (!ret)
+               drm_connector_property_set_value(connector, property, out_resp->value);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+                                     struct drm_encoder *encoder)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == 0) {
+                       connector->encoder_ids[i] = encoder->base.id;
+                       return 0;
+               }
+       }
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+                                   struct drm_encoder *encoder)
+{
+       int i;
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == encoder->base.id) {
+                       connector->encoder_ids[i] = 0;
+                       if (connector->encoder == encoder)
+                               connector->encoder = NULL;
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
+
+bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+                                 int gamma_size)
+{
+       crtc->gamma_size = gamma_size;
+
+       crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+       if (!crtc->gamma_store) {
+               crtc->gamma_size = 0;
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+
+}
+
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_page_flip *page_flip = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+       struct drm_pending_vblank_event *e = NULL;
+       unsigned long flags;
+       int ret = -EINVAL;
+
+       if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+           page_flip->reserved != 0)
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj)
+               goto out;
+       crtc = obj_to_crtc(obj);
+
+       if (crtc->funcs->page_flip == NULL)
+               goto out;
+
+       obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj)
+               goto out;
+       fb = obj_to_fb(obj);
+
+       if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+               ret = -ENOMEM;
+               spin_lock_irqsave(&dev->event_lock, flags);
+               if (file_priv->event_space < sizeof e->event) {
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       goto out;
+               }
+               file_priv->event_space -= sizeof e->event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+
+               e = kzalloc(sizeof *e, GFP_KERNEL);
+               if (e == NULL) {
+                       spin_lock_irqsave(&dev->event_lock, flags);
+                       file_priv->event_space += sizeof e->event;
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       goto out;
+               }
+
+               e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+               e->event.base.length = sizeof e->event;
+               e->event.user_data = page_flip->user_data;
+               e->base.event = &e->event.base;
+               e->base.file_priv = file_priv;
+               e->base.destroy =
+                       (void (*) (struct drm_pending_event *)) kfree;
+       }
+
+       ret = crtc->funcs->page_flip(crtc, fb, e);
+       if (ret) {
+               spin_lock_irqsave(&dev->event_lock, flags);
+               file_priv->event_space += sizeof e->event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+               kfree(e);
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_debugfs.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_debugfs.c
new file mode 100644 (file)
index 0000000..9903f27
--- /dev/null
@@ -0,0 +1,236 @@
+/**
+ * \file drm_debugfs.c
+ * debugfs support for DRM
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+static struct drm_info_list drm_debugfs_list[] = {
+       {"name", drm_name_info, 0},
+       {"vm", drm_vm_info, 0},
+       {"clients", drm_clients_info, 0},
+       {"queues", drm_queues_info, 0},
+       {"bufs", drm_bufs_info, 0},
+       {"gem_names", drm_gem_name_info, DRIVER_GEM},
+       {"gem_objects", drm_gem_object_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+       {"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
+
+
+static int drm_debugfs_open(struct inode *inode, struct file *file)
+{
+       struct drm_info_node *node = inode->i_private;
+
+       return single_open(file, node->info_ent->show, node);
+}
+
+
+static const struct file_operations drm_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_debugfs_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+
+/**
+ * Initialize a given set of debugfs files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI debugfs dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of debugfs files represented by an array of
+ * gdm_debugfs_lists in the given root directory.
+ */
+int drm_debugfs_create_files(struct drm_info_list *files, int count,
+                            struct dentry *root, struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+       struct dentry *ent;
+       struct drm_info_node *tmp;
+       char name[64];
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+               u32 features = files[i].driver_features;
+
+               if (features != 0 &&
+                   (dev->driver->driver_features & features) != features)
+                       continue;
+
+               tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+               if (tmp == NULL) {
+                       ret = -1;
+                       goto fail;
+               }
+               ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
+                                         root, tmp, &drm_debugfs_fops);
+               if (!ent) {
+                       DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
+                                 name, files[i].name);
+                       kfree(tmp);
+                       ret = -1;
+                       goto fail;
+               }
+
+               tmp->minor = minor;
+               tmp->dent = ent;
+               tmp->info_ent = &files[i];
+               list_add(&(tmp->list), &(minor->debugfs_nodes.list));
+       }
+       return 0;
+
+fail:
+       drm_debugfs_remove_files(files, count, minor);
+       return ret;
+}
+EXPORT_SYMBOL(drm_debugfs_create_files);
+
+/**
+ * Initialize the DRI debugfs filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
+ * \param root DRI debugfs dir entry.
+ *
+ * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
+ * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
+ * "/sys/kernel/debug/dri/%minor%/%name%".
+ */
+int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+                    struct dentry *root)
+{
+       struct drm_device *dev = minor->dev;
+       char name[64];
+       int ret;
+
+       INIT_LIST_HEAD(&minor->debugfs_nodes.list);
+       sprintf(name, "%d", minor_id);
+       minor->debugfs_root = debugfs_create_dir(name, root);
+       if (!minor->debugfs_root) {
+               DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
+               return -1;
+       }
+
+       ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+                                      minor->debugfs_root, minor);
+       if (ret) {
+               debugfs_remove(minor->debugfs_root);
+               minor->debugfs_root = NULL;
+               DRM_ERROR("Failed to create core drm debugfs files\n");
+               return ret;
+       }
+
+       if (dev->driver->debugfs_init) {
+               ret = dev->driver->debugfs_init(minor);
+               if (ret) {
+                       DRM_ERROR("DRM: Driver failed to initialize "
+                                 "/sys/kernel/debug/dri.\n");
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+
+/**
+ * Remove a list of debugfs files
+ *
+ * \param files The list of files
+ * \param count The number of files
+ * \param minor The minor of which we should remove the files
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+                            struct drm_minor *minor)
+{
+       struct list_head *pos, *q;
+       struct drm_info_node *tmp;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
+                       tmp = list_entry(pos, struct drm_info_node, list);
+                       if (tmp->info_ent == &files[i]) {
+                               debugfs_remove(tmp->dent);
+                               list_del(pos);
+                               kfree(tmp);
+                       }
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_debugfs_remove_files);
+
+/**
+ * Cleanup the debugfs filesystem resources.
+ *
+ * \param minor device minor number.
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_cleanup(struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+
+       if (!minor->debugfs_root)
+               return 0;
+
+       if (dev->driver->debugfs_cleanup)
+               dev->driver->debugfs_cleanup(minor);
+
+       drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
+
+       debugfs_remove(minor->debugfs_root);
+       minor->debugfs_root = NULL;
+
+       return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_dma.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_dma.c
new file mode 100644 (file)
index 0000000..13f1537
--- /dev/null
@@ -0,0 +1,163 @@
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Initialize the DMA data.
+ *
+ * \param dev DRM device.
+ * \return zero on success or a negative value on failure.
+ *
+ * Allocate and initialize a drm_device_dma structure.
+ */
+int drm_dma_setup(struct drm_device *dev)
+{
+       int i;
+
+       dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+       if (!dev->dma)
+               return -ENOMEM;
+
+       memset(dev->dma, 0, sizeof(*dev->dma));
+
+       for (i = 0; i <= DRM_MAX_ORDER; i++)
+               memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
+
+       return 0;
+}
+
+/**
+ * Cleanup the DMA resources.
+ *
+ * \param dev DRM device.
+ *
+ * Free all pages associated with DMA buffers, the buffers and pages lists, and
+ * finally the drm_device::dma structure itself.
+ */
+void drm_dma_takedown(struct drm_device *dev)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int i, j;
+
+       if (!dma)
+               return;
+
+       /* Clear dma buffers */
+       for (i = 0; i <= DRM_MAX_ORDER; i++) {
+               if (dma->bufs[i].seg_count) {
+                       DRM_DEBUG("order %d: buf_count = %d,"
+                                 " seg_count = %d\n",
+                                 i,
+                                 dma->bufs[i].buf_count,
+                                 dma->bufs[i].seg_count);
+                       for (j = 0; j < dma->bufs[i].seg_count; j++) {
+                               if (dma->bufs[i].seglist[j]) {
+                                       drm_pci_free(dev, dma->bufs[i].seglist[j]);
+                               }
+                       }
+                       kfree(dma->bufs[i].seglist);
+               }
+               if (dma->bufs[i].buf_count) {
+                       for (j = 0; j < dma->bufs[i].buf_count; j++) {
+                               kfree(dma->bufs[i].buflist[j].dev_private);
+                       }
+                       kfree(dma->bufs[i].buflist);
+               }
+       }
+
+       kfree(dma->buflist);
+       kfree(dma->pagelist);
+       kfree(dev->dma);
+       dev->dma = NULL;
+}
+
+/**
+ * Free a buffer.
+ *
+ * \param dev DRM device.
+ * \param buf buffer to free.
+ *
+ * Resets the fields of \p buf.
+ */
+void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
+{
+       if (!buf)
+               return;
+
+       buf->waiting = 0;
+       buf->pending = 0;
+       buf->file_priv = NULL;
+       buf->used = 0;
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
+           && waitqueue_active(&buf->dma_wait)) {
+               wake_up_interruptible(&buf->dma_wait);
+       }
+}
+
+/**
+ * Reclaim the buffers.
+ *
+ * \param file_priv DRM file private.
+ *
+ * Frees each buffer associated with \p file_priv not already on the hardware.
+ */
+void drm_core_reclaim_buffers(struct drm_device *dev,
+                             struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int i;
+
+       if (!dma)
+               return;
+       for (i = 0; i < dma->buf_count; i++) {
+               if (dma->buflist[i]->file_priv == file_priv) {
+                       switch (dma->buflist[i]->list) {
+                       case DRM_LIST_NONE:
+                               drm_free_buffer(dev, dma->buflist[i]);
+                               break;
+                       case DRM_LIST_WAIT:
+                               dma->buflist[i]->list = DRM_LIST_RECLAIM;
+                               break;
+                       default:
+                               /* Buffer already on hardware. */
+                               break;
+                       }
+               }
+       }
+}
+
+EXPORT_SYMBOL(drm_core_reclaim_buffers);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_drawable.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_drawable.c
new file mode 100644 (file)
index 0000000..c53c976
--- /dev/null
@@ -0,0 +1,198 @@
+/**
+ * \file drm_drawable.c
+ * IOCTLs for drawables
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ * \author Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Allocate drawable ID and memory to store information about it.
+ */
+int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       unsigned long irqflags;
+       struct drm_draw *draw = data;
+       int new_id = 0;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&dev->drw_lock, irqflags);
+       ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
+       if (ret == -EAGAIN) {
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+               goto again;
+       }
+
+       spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+       draw->handle = new_id;
+
+       DRM_DEBUG("%d\n", draw->handle);
+
+       return 0;
+}
+
+/**
+ * Free drawable ID and memory to store information about it.
+ */
+int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_draw *draw = data;
+       unsigned long irqflags;
+       struct drm_drawable_info *info;
+
+       spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+       info = drm_get_drawable_info(dev, draw->handle);
+       if (info == NULL) {
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+               return -EINVAL;
+       }
+       kfree(info->rects);
+       kfree(info);
+
+       idr_remove(&dev->drw_idr, draw->handle);
+
+       spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+       DRM_DEBUG("%d\n", draw->handle);
+       return 0;
+}
+
+int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_update_draw *update = data;
+       unsigned long irqflags;
+       struct drm_clip_rect *rects;
+       struct drm_drawable_info *info;
+       int err;
+
+       info = idr_find(&dev->drw_idr, update->handle);
+       if (!info) {
+               info = kzalloc(sizeof(*info), GFP_KERNEL);
+               if (!info)
+                       return -ENOMEM;
+               if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
+                       DRM_ERROR("No such drawable %d\n", update->handle);
+                       kfree(info);
+                       return -EINVAL;
+               }
+       }
+
+       switch (update->type) {
+       case DRM_DRAWABLE_CLIPRECTS:
+               if (update->num == 0)
+                       rects = NULL;
+               else if (update->num != info->num_rects) {
+                       rects = kmalloc(update->num *
+                                       sizeof(struct drm_clip_rect),
+                                       GFP_KERNEL);
+               } else
+                       rects = info->rects;
+
+               if (update->num && !rects) {
+                       DRM_ERROR("Failed to allocate cliprect memory\n");
+                       err = -ENOMEM;
+                       goto error;
+               }
+
+               if (update->num && DRM_COPY_FROM_USER(rects,
+                                                    (struct drm_clip_rect __user *)
+                                                    (unsigned long)update->data,
+                                                    update->num *
+                                                    sizeof(*rects))) {
+                       DRM_ERROR("Failed to copy cliprects from userspace\n");
+                       err = -EFAULT;
+                       goto error;
+               }
+
+               spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+               if (rects != info->rects) {
+                       kfree(info->rects);
+               }
+
+               info->rects = rects;
+               info->num_rects = update->num;
+
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+               DRM_DEBUG("Updated %d cliprects for drawable %d\n",
+                         info->num_rects, update->handle);
+               break;
+       default:
+               DRM_ERROR("Invalid update type %d\n", update->type);
+               return -EINVAL;
+       }
+
+       return 0;
+
+error:
+       if (rects != info->rects)
+               kfree(rects);
+
+       return err;
+}
+
+/**
+ * Caller must hold the drawable spinlock!
+ */
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
+{
+       return idr_find(&dev->drw_idr, id);
+}
+EXPORT_SYMBOL(drm_get_drawable_info);
+
+static int drm_drawable_free(int idr, void *p, void *data)
+{
+       struct drm_drawable_info *info = p;
+
+       if (info) {
+               kfree(info->rects);
+               kfree(info);
+       }
+
+       return 0;
+}
+
+void drm_drawable_free_all(struct drm_device *dev)
+{
+       idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
+       idr_remove_all(&dev->drw_idr);
+}
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_drv.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_drv.c
new file mode 100644 (file)
index 0000000..ff2f104
--- /dev/null
@@ -0,0 +1,536 @@
+/**
+ * \file drm_drv.c
+ * Generic driver template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * To use this template, you must at least define the following (samples
+ * given for the MGA driver):
+ *
+ * \code
+ * #define DRIVER_AUTHOR       "VA Linux Systems, Inc."
+ *
+ * #define DRIVER_NAME         "mga"
+ * #define DRIVER_DESC         "Matrox G200/G400"
+ * #define DRIVER_DATE         "20001127"
+ *
+ * #define drm_x               mga_##x
+ * \endcode
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include "drmP.h"
+#include "drm_core.h"
+
+
+static int drm_version(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+
+/** Ioctl table */
+static struct drm_ioctl_desc drm_ioctls[] = {
+       DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+       /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+       DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+#if __OS_HAS_AGP
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
+};
+
+#define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+       struct drm_vma_entry *vma, *vma_temp;
+       int i;
+
+       DRM_DEBUG("\n");
+
+       if (dev->driver->lastclose)
+               dev->driver->lastclose(dev);
+       DRM_DEBUG("driver lastclose completed\n");
+
+       if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_irq_uninstall(dev);
+
+       mutex_lock(&dev->struct_mutex);
+
+       /* Free drawable information memory */
+       drm_drawable_free_all(dev);
+       del_timer(&dev->timer);
+
+       /* Clear AGP information */
+       if (drm_core_has_AGP(dev) && dev->agp &&
+                       !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               struct drm_agp_mem *entry, *tempe;
+
+               /* Remove AGP resources, but leave dev->agp
+                  intact until drv_cleanup is called. */
+               list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+                       if (entry->bound)
+                               drm_unbind_agp(entry->memory);
+                       drm_free_agp(entry->memory, entry->pages);
+                       kfree(entry);
+               }
+               INIT_LIST_HEAD(&dev->agp->memory);
+
+               if (dev->agp->acquired)
+                       drm_agp_release(dev);
+
+               dev->agp->acquired = 0;
+               dev->agp->enabled = 0;
+       }
+       if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               drm_sg_cleanup(dev->sg);
+               dev->sg = NULL;
+       }
+
+       /* Clear vma list (only built for debugging) */
+       list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+               list_del(&vma->head);
+               kfree(vma);
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
+               for (i = 0; i < dev->queue_count; i++) {
+                       kfree(dev->queuelist[i]);
+                       dev->queuelist[i] = NULL;
+               }
+               kfree(dev->queuelist);
+               dev->queuelist = NULL;
+       }
+       dev->queue_count = 0;
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_dma_takedown(dev);
+
+       dev->dev_mapping = NULL;
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_DEBUG("lastclose completed\n");
+       return 0;
+}
+
+/**
+ * Module initialization. Called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported).
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes an array of drm_device structures, and attempts to
+ * initialize all available devices, using consecutive minors, registering the
+ * stubs and initializing the AGP device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_init(struct drm_driver *driver)
+{
+       struct pci_dev *pdev = NULL;
+       const struct pci_device_id *pid;
+       int i;
+
+       DRM_DEBUG("\n");
+
+       INIT_LIST_HEAD(&driver->device_list);
+
+       if (driver->driver_features & DRIVER_MODESET)
+               return pci_register_driver(&driver->pci_driver);
+
+       /* If not using KMS, fall back to stealth mode manual scanning. */
+       for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
+               pid = &driver->pci_driver.id_table[i];
+
+               /* Loop around setting up a DRM device for each PCI device
+                * matching our ID and device class.  If we had the internal
+                * function that pci_get_subsys and pci_get_class used, we'd
+                * be able to just pass pid in instead of doing a two-stage
+                * thing.
+                */
+               pdev = NULL;
+               while ((pdev =
+                       pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
+                                      pid->subdevice, pdev)) != NULL) {
+                       if ((pdev->class & pid->class_mask) != pid->class)
+                               continue;
+
+                       /* stealth mode requires a manual probe */
+                       pci_dev_get(pdev);
+                       drm_get_dev(pdev, pid, driver);
+               }
+       }
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_init);
+
+void drm_exit(struct drm_driver *driver)
+{
+       struct drm_device *dev, *tmp;
+       DRM_DEBUG("\n");
+
+       if (driver->driver_features & DRIVER_MODESET) {
+               pci_unregister_driver(&driver->pci_driver);
+       } else {
+               list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+                       drm_put_dev(dev);
+       }
+
+       DRM_INFO("Module unloaded\n");
+}
+
+EXPORT_SYMBOL(drm_exit);
+
+/** File operations structure */
+static const struct file_operations drm_stub_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_stub_open
+};
+
+static int __init drm_core_init(void)
+{
+       int ret = -ENOMEM;
+
+       idr_init(&drm_minors_idr);
+
+       if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
+               goto err_p1;
+
+       drm_class = drm_sysfs_create(THIS_MODULE, "drm");
+       if (IS_ERR(drm_class)) {
+               printk(KERN_ERR "DRM: Error creating drm class.\n");
+               ret = PTR_ERR(drm_class);
+               goto err_p2;
+       }
+
+       drm_proc_root = proc_mkdir("dri", NULL);
+       if (!drm_proc_root) {
+               DRM_ERROR("Cannot create /proc/dri\n");
+               ret = -1;
+               goto err_p3;
+       }
+
+       drm_debugfs_root = debugfs_create_dir("dri", NULL);
+       if (!drm_debugfs_root) {
+               DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
+               ret = -1;
+               goto err_p3;
+       }
+
+       DRM_INFO("Initialized %s %d.%d.%d %s\n",
+                CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+       return 0;
+err_p3:
+       drm_sysfs_destroy();
+err_p2:
+       unregister_chrdev(DRM_MAJOR, "drm");
+
+       idr_destroy(&drm_minors_idr);
+err_p1:
+       return ret;
+}
+
+static void __exit drm_core_exit(void)
+{
+       remove_proc_entry("dri", NULL);
+       debugfs_remove(drm_debugfs_root);
+       drm_sysfs_destroy();
+
+       unregister_chrdev(DRM_MAJOR, "drm");
+
+       idr_destroy(&drm_minors_idr);
+}
+
+module_init(drm_core_init);
+module_exit(drm_core_exit);
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+{
+       int len;
+
+       /* don't overflow userbuf */
+       len = strlen(value);
+       if (len > *buf_len)
+               len = *buf_len;
+
+       /* let userspace know exact length of driver value (which could be
+        * larger than the userspace-supplied buffer) */
+       *buf_len = strlen(value);
+
+       /* finally, try filling in the userbuf */
+       if (len && buf)
+               if (copy_to_user(buf, value, len))
+                       return -EFAULT;
+       return 0;
+}
+
+/**
+ * Get version information
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_version structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+static int drm_version(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_version *version = data;
+       int err;
+
+       version->version_major = dev->driver->major;
+       version->version_minor = dev->driver->minor;
+       version->version_patchlevel = dev->driver->patchlevel;
+       err = drm_copy_field(version->name, &version->name_len,
+                       dev->driver->name);
+       if (!err)
+               err = drm_copy_field(version->date, &version->date_len,
+                               dev->driver->date);
+       if (!err)
+               err = drm_copy_field(version->desc, &version->desc_len,
+                               dev->driver->desc);
+
+       return err;
+}
+
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
+int drm_ioctl(struct inode *inode, struct file *filp,
+             unsigned int cmd, unsigned long arg)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_ioctl_desc *ioctl;
+       drm_ioctl_t *func;
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+       int retcode = -EINVAL;
+       char stack_kdata[128];
+       char *kdata = NULL;
+
+       atomic_inc(&dev->ioctl_count);
+       atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+       ++file_priv->ioctl_count;
+
+       DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
+                 task_pid_nr(current), cmd, nr,
+                 (long)old_encode_dev(file_priv->minor->device),
+                 file_priv->authenticated);
+
+       if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+           ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+               goto err_i1;
+       if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+           (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+               ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+       else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+               ioctl = &drm_ioctls[nr];
+               cmd = ioctl->cmd;
+       } else
+               goto err_i1;
+
+       /* Do not trust userspace, use our own definition */
+       func = ioctl->func;
+       /* is there a local override? */
+       if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
+               func = dev->driver->dma_ioctl;
+
+       if (!func) {
+               DRM_DEBUG("no function\n");
+               retcode = -EINVAL;
+       } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
+                  ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+                  ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
+                  (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+               retcode = -EACCES;
+       } else {
+               if (cmd & (IOC_IN | IOC_OUT)) {
+                       if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
+                               kdata = stack_kdata;
+                       } else {
+                               kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+                               if (!kdata) {
+                                       retcode = -ENOMEM;
+                                       goto err_i1;
+                               }
+                       }
+               }
+
+               if (cmd & IOC_IN) {
+                       if (copy_from_user(kdata, (void __user *)arg,
+                                          _IOC_SIZE(cmd)) != 0) {
+                               retcode = -EFAULT;
+                               goto err_i1;
+                       }
+               }
+               retcode = func(dev, kdata, file_priv);
+
+               if (cmd & IOC_OUT) {
+                       if (copy_to_user((void __user *)arg, kdata,
+                                        _IOC_SIZE(cmd)) != 0)
+                               retcode = -EFAULT;
+               }
+       }
+
+      err_i1:
+       if (kdata != stack_kdata)
+               kfree(kdata);
+       atomic_dec(&dev->ioctl_count);
+       if (retcode)
+               DRM_DEBUG("ret = %x\n", retcode);
+       return retcode;
+}
+
+EXPORT_SYMBOL(drm_ioctl);
+
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+       struct drm_map_list *entry;
+
+       list_for_each_entry(entry, &dev->maplist, head) {
+               if (entry->map && entry->map->type == _DRM_SHM &&
+                   (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+                       return entry->map;
+               }
+       }
+       return NULL;
+}
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_edid.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_edid.c
new file mode 100644 (file)
index 0000000..c39b26f
--- /dev/null
@@ -0,0 +1,1387 @@
+/*
+ * Copyright (c) 2006 Luc Verhaegen (quirks list)
+ * Copyright (c) 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ *   Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm_edid.h"
+
+/*
+ * TODO:
+ *   - support EDID 1.4 (incl. CE blocks)
+ */
+
+/*
+ * EDID blocks out in the wild have a variety of bugs, try to collect
+ * them here (note that userspace may work around broken monitors first,
+ * but fixes should make their way here so that the kernel "just works"
+ * on as many displays as possible).
+ */
+
+/* First detailed mode wrong, use largest 60Hz mode */
+#define EDID_QUIRK_PREFER_LARGE_60             (1 << 0)
+/* Reported 135MHz pixel clock is too high, needs adjustment */
+#define EDID_QUIRK_135_CLOCK_TOO_HIGH          (1 << 1)
+/* Prefer the largest mode at 75 Hz */
+#define EDID_QUIRK_PREFER_LARGE_75             (1 << 2)
+/* Detail timing is in cm not mm */
+#define EDID_QUIRK_DETAILED_IN_CM              (1 << 3)
+/* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE   (1 << 4)
+/* Monitor forgot to set the first detailed is preferred bit. */
+#define EDID_QUIRK_FIRST_DETAILED_PREFERRED    (1 << 5)
+/* use +hsync +vsync for detailed mode */
+#define EDID_QUIRK_DETAILED_SYNC_PP            (1 << 6)
+/* define the number of Extension EDID block */
+#define MAX_EDID_EXT_NUM 4
+
+#define LEVEL_DMT      0
+#define LEVEL_GTF      1
+#define LEVEL_CVT      2
+
+static struct edid_quirk {
+       char *vendor;
+       int product_id;
+       u32 quirks;
+} edid_quirk_list[] = {
+       /* Acer AL1706 */
+       { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+       /* Acer F51 */
+       { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+       /* Unknown Acer */
+       { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Belinea 10 15 55 */
+       { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+       { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+
+       /* Envision Peripherals, Inc. EN-7100e */
+       { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+
+       /* Funai Electronics PM36B */
+       { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+         EDID_QUIRK_DETAILED_IN_CM },
+
+       /* LG Philips LCD LP154W01-A5 */
+       { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+       { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+
+       /* Philips 107p5 CRT */
+       { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Proview AY765C */
+       { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Samsung SyncMaster 205BW.  Note: irony */
+       { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+       /* Samsung SyncMaster 22[5-6]BW */
+       { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+       { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+};
+
+
+/* Valid EDID header has these bytes */
+static const u8 edid_header[] = {
+       0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
+
+/**
+ * edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity check the EDID block by looking at the header, the version number
+ * and the checksum.  Return 0 if the EDID doesn't check out, or 1 if it's
+ * valid.
+ */
+static bool edid_is_valid(struct edid *edid)
+{
+       int i, score = 0;
+       u8 csum = 0;
+       u8 *raw_edid = (u8 *)edid;
+
+       for (i = 0; i < sizeof(edid_header); i++)
+               if (raw_edid[i] == edid_header[i])
+                       score++;
+
+       if (score == 8) ;
+       else if (score >= 6) {
+               DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+               memcpy(raw_edid, edid_header, sizeof(edid_header));
+       } else
+               goto bad;
+
+       for (i = 0; i < EDID_LENGTH; i++)
+               csum += raw_edid[i];
+       if (csum) {
+               DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+               goto bad;
+       }
+
+       if (edid->version != 1) {
+               DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+               goto bad;
+       }
+
+       if (edid->revision > 4)
+               DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+
+       return 1;
+
+bad:
+       if (raw_edid) {
+               DRM_ERROR("Raw EDID:\n");
+               print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
+               printk("\n");
+       }
+       return 0;
+}
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+static bool edid_vendor(struct edid *edid, char *vendor)
+{
+       char edid_vendor[3];
+
+       edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+       edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+                         ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+       edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
+
+       return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+static u32 edid_get_quirks(struct edid *edid)
+{
+       struct edid_quirk *quirk;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+               quirk = &edid_quirk_list[i];
+
+               if (edid_vendor(edid, quirk->vendor) &&
+                   (EDID_PRODUCT_ID(edid) == quirk->product_id))
+                       return quirk->quirks;
+       }
+
+       return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+static void edid_fixup_preferred(struct drm_connector *connector,
+                                u32 quirks)
+{
+       struct drm_display_mode *t, *cur_mode, *preferred_mode;
+       int target_refresh = 0;
+
+       if (list_empty(&connector->probed_modes))
+               return;
+
+       if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+               target_refresh = 60;
+       if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+               target_refresh = 75;
+
+       preferred_mode = list_first_entry(&connector->probed_modes,
+                                         struct drm_display_mode, head);
+
+       list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
+               cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+               if (cur_mode == preferred_mode)
+                       continue;
+
+               /* Largest mode is preferred */
+               if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+                       preferred_mode = cur_mode;
+
+               /* At a given size, try to get closest to target refresh */
+               if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+                   MODE_REFRESH_DIFF(cur_mode, target_refresh) <
+                   MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+                       preferred_mode = cur_mode;
+               }
+       }
+
+       preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+/*
+ * Add the Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ * But the mode with Reduced blank feature is deleted.
+ */
+static struct drm_display_mode drm_dmt_modes[] = {
+       /* 640x350@85Hz */
+       { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+                  736, 832, 0, 350, 382, 385, 445, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 640x400@85Hz */
+       { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+                  736, 832, 0, 400, 401, 404, 445, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 720x400@85Hz */
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+                  828, 936, 0, 400, 401, 404, 446, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 640x480@60Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+                  752, 800, 0, 480, 489, 492, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 640x480@72Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+                  704, 832, 0, 480, 489, 492, 520, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 640x480@75Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+                  720, 840, 0, 480, 481, 484, 500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 640x480@85Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+                  752, 832, 0, 480, 481, 484, 509, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 800x600@56Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+                  896, 1024, 0, 600, 601, 603, 625, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 800x600@60Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+                  968, 1056, 0, 600, 601, 605, 628, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 800x600@72Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+                  976, 1040, 0, 600, 637, 643, 666, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 800x600@75Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+                  896, 1056, 0, 600, 601, 604, 625, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 800x600@85Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+                  896, 1048, 0, 600, 601, 604, 631, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 848x480@60Hz */
+       { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+                  976, 1088, 0, 480, 486, 494, 517, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1024x768@43Hz, interlace */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+                  1208, 1264, 0, 768, 768, 772, 817, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1024x768@60Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+                  1184, 1344, 0, 768, 771, 777, 806, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1024x768@70Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+                  1184, 1328, 0, 768, 771, 777, 806, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1024x768@75Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+                  1136, 1312, 0, 768, 769, 772, 800, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1024x768@85Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+                  1072, 1376, 0, 768, 769, 772, 808, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1152x864@75Hz */
+       { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+                  1344, 1600, 0, 864, 865, 868, 900, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x768@60Hz */
+       { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+                  1472, 1664, 0, 768, 771, 778, 798, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x768@75Hz */
+       { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+                  1488, 1696, 0, 768, 771, 778, 805, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1280x768@85Hz */
+       { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+                  1496, 1712, 0, 768, 771, 778, 809, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x800@60Hz */
+       { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+                  1480, 1680, 0, 800, 803, 809, 831, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1280x800@75Hz */
+       { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+                  1488, 1696, 0, 800, 803, 809, 838, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x800@85Hz */
+       { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+                  1496, 1712, 0, 800, 803, 809, 843, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x960@60Hz */
+       { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+                  1488, 1800, 0, 960, 961, 964, 1000, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x960@85Hz */
+       { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+                  1504, 1728, 0, 960, 961, 964, 1011, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x1024@60Hz */
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+                  1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x1024@75Hz */
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+                  1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x1024@85Hz */
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+                  1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1360x768@60Hz */
+       { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+                  1536, 1792, 0, 768, 771, 777, 795, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x1050@60Hz */
+       { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+                  1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x1050@75Hz */
+       { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+                  1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x1050@85Hz */
+       { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+                  1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x900@60Hz */
+       { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+                  1672, 1904, 0, 900, 903, 909, 934, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x900@75Hz */
+       { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+                  1688, 1936, 0, 900, 903, 909, 942, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x900@85Hz */
+       { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+                  1696, 1952, 0, 900, 903, 909, 948, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1600x1200@60Hz */
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+                  1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1600x1200@65Hz */
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+                  1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1600x1200@70Hz */
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+                  1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1600x1200@75Hz */
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+                  1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1600x1200@85Hz */
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+                  1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1680x1050@60Hz */
+       { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+                  1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1680x1050@75Hz */
+       { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+                  1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1680x1050@85Hz */
+       { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+                  1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1792x1344@60Hz */
+       { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+                  2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1729x1344@75Hz */
+       { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+                  2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1853x1392@60Hz */
+       { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+                  2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1856x1392@75Hz */
+       { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+                  2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1200@60Hz */
+       { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+                  2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1200@75Hz */
+       { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+                  2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1200@85Hz */
+       { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+                  2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1440@60Hz */
+       { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+                  2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1440@75Hz */
+       { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+                  2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 2560x1600@60Hz */
+       { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+                  3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 2560x1600@75HZ */
+       { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+                  3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 2560x1600@85HZ */
+       { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+                  3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_dmt_modes =
+       sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+
+static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
+                       int hsize, int vsize, int fresh)
+{
+       int i;
+       struct drm_display_mode *ptr, *mode;
+
+       mode = NULL;
+       for (i = 0; i < drm_num_dmt_modes; i++) {
+               ptr = &drm_dmt_modes[i];
+               if (hsize == ptr->hdisplay &&
+                       vsize == ptr->vdisplay &&
+                       fresh == drm_mode_vrefresh(ptr)) {
+                       /* get the expected default mode */
+                       mode = drm_mode_duplicate(dev, ptr);
+                       break;
+               }
+       }
+       return mode;
+}
+
+/*
+ * 0 is reserved.  The spec says 0x01 fill for unused timings.  Some old
+ * monitors fill with ascii space (0x20) instead.
+ */
+static int
+bad_std_timing(u8 a, u8 b)
+{
+       return (a == 0x00 && b == 0x00) ||
+              (a == 0x01 && b == 0x01) ||
+              (a == 0x20 && b == 0x20);
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ * @timing_level: standard timing level
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT/GTF/DMT.
+ *
+ * Punts for now, but should eventually use the FB layer's CVT based mode
+ * generation code.
+ */
+struct drm_display_mode *drm_mode_std(struct drm_device *dev,
+                                     struct std_timing *t,
+                                     int revision,
+                                     int timing_level)
+{
+       struct drm_display_mode *mode;
+       int hsize, vsize;
+       int vrefresh_rate;
+       unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
+               >> EDID_TIMING_ASPECT_SHIFT;
+       unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
+               >> EDID_TIMING_VFREQ_SHIFT;
+
+       if (bad_std_timing(t->hsize, t->vfreq_aspect))
+               return NULL;
+
+       /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
+       hsize = t->hsize * 8 + 248;
+       /* vrefresh_rate = vfreq + 60 */
+       vrefresh_rate = vfreq + 60;
+       /* the vdisplay is calculated based on the aspect ratio */
+       if (aspect_ratio == 0) {
+               if (revision < 3)
+                       vsize = hsize;
+               else
+                       vsize = (hsize * 10) / 16;
+       } else if (aspect_ratio == 1)
+               vsize = (hsize * 3) / 4;
+       else if (aspect_ratio == 2)
+               vsize = (hsize * 4) / 5;
+       else
+               vsize = (hsize * 9) / 16;
+       /* HDTV hack */
+       if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
+               mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+                                   false);
+               mode->hdisplay = 1366;
+               mode->vsync_start = mode->vsync_start - 1;
+               mode->vsync_end = mode->vsync_end - 1;
+               return mode;
+       }
+       mode = NULL;
+       /* check whether it can be found in default mode table */
+       mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
+       if (mode)
+               return mode;
+
+       switch (timing_level) {
+       case LEVEL_DMT:
+               break;
+       case LEVEL_GTF:
+               mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+               break;
+       case LEVEL_CVT:
+               mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+                                   false);
+               break;
+       }
+       return mode;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+ * @edid: EDID block
+ * @timing: EDID detailed timing info
+ * @quirks: quirks to apply
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.
+ */
+static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+                                                 struct edid *edid,
+                                                 struct detailed_timing *timing,
+                                                 u32 quirks)
+{
+       struct drm_display_mode *mode;
+       struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+       unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
+       unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
+       unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
+       unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
+       unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+       unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+       unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+       unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
+
+       /* ignore tiny modes */
+       if (hactive < 64 || vactive < 64)
+               return NULL;
+
+       if (pt->misc & DRM_EDID_PT_STEREO) {
+               printk(KERN_WARNING "stereo mode not supported\n");
+               return NULL;
+       }
+       if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+               printk(KERN_WARNING "integrated sync not supported\n");
+               return NULL;
+       }
+
+       /* it is incorrect if hsync/vsync width is zero */
+       if (!hsync_pulse_width || !vsync_pulse_width) {
+               DRM_DEBUG_KMS("Incorrect Detailed timing. "
+                               "Wrong Hsync/Vsync pulse width\n");
+               return NULL;
+       }
+       mode = drm_mode_create(dev);
+       if (!mode)
+               return NULL;
+
+       mode->type = DRM_MODE_TYPE_DRIVER;
+
+       if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+               timing->pixel_clock = cpu_to_le16(1088);
+
+       mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
+
+       mode->hdisplay = hactive;
+       mode->hsync_start = mode->hdisplay + hsync_offset;
+       mode->hsync_end = mode->hsync_start + hsync_pulse_width;
+       mode->htotal = mode->hdisplay + hblank;
+
+       mode->vdisplay = vactive;
+       mode->vsync_start = mode->vdisplay + vsync_offset;
+       mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+       mode->vtotal = mode->vdisplay + vblank;
+
+       /* perform the basic check for the detailed timing */
+       if (mode->hsync_end > mode->htotal ||
+               mode->vsync_end > mode->vtotal) {
+               drm_mode_destroy(dev, mode);
+               DRM_DEBUG_KMS("Incorrect detailed timing. "
+                               "Sync is beyond the blank.\n");
+               return NULL;
+       }
+
+       /* Some EDIDs have bogus h/vtotal values */
+       if (mode->hsync_end > mode->htotal)
+               mode->htotal = mode->hsync_end + 1;
+       if (mode->vsync_end > mode->vtotal)
+               mode->vtotal = mode->vsync_end + 1;
+
+       drm_mode_set_name(mode);
+
+       if (pt->misc & DRM_EDID_PT_INTERLACED)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+       if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+               pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+       }
+
+       mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+               DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+       mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+               DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+       mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+       mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+
+       if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+               mode->width_mm *= 10;
+               mode->height_mm *= 10;
+       }
+
+       if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+               mode->width_mm = edid->width_cm * 10;
+               mode->height_mm = edid->height_cm * 10;
+       }
+
+       return mode;
+}
+
+/*
+ * Detailed mode info for the EDID "established modes" data to use.
+ */
+static struct drm_display_mode edid_est_modes[] = {
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+                  968, 1056, 0, 600, 601, 605, 628, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+                  896, 1024, 0, 600, 601, 603,  625, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+                  720, 840, 0, 480, 481, 484, 500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+                  704,  832, 0, 480, 489, 491, 520, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+                  768,  864, 0, 480, 483, 486, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+                  752, 800, 0, 480, 490, 492, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+                  846, 900, 0, 400, 421, 423,  449, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+                  846,  900, 0, 400, 412, 414, 449, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+                  1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+                  1136, 1312, 0,  768, 769, 772, 800, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+                  1184, 1328, 0,  768, 771, 777, 806, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+                  1184, 1344, 0,  768, 771, 777, 806, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+                  1208, 1264, 0, 768, 768, 776, 817, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+       { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+                  928, 1152, 0, 624, 625, 628, 667, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+                  896, 1056, 0, 600, 601, 604,  625, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+                  976, 1040, 0, 600, 637, 643, 666, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+       { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+                  1344, 1600, 0,  864, 865, 868, 900, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above).  Tease them out and add them to the global modes list.
+ */
+static int add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       unsigned long est_bits = edid->established_timings.t1 |
+               (edid->established_timings.t2 << 8) |
+               ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+       int i, modes = 0;
+
+       for (i = 0; i <= EDID_EST_TIMINGS; i++)
+               if (est_bits & (1<<i)) {
+                       struct drm_display_mode *newmode;
+                       newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+
+       return modes;
+}
+/**
+ * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+       if (edid->revision >= 2) {
+               if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+                       return LEVEL_CVT;
+               return LEVEL_GTF;
+       }
+       return LEVEL_DMT;
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the CVT standard.  Grab them from
+ * @edid, calculate them, and add them to the list.
+ */
+static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       int i, modes = 0;
+       int timing_level;
+
+       timing_level = standard_timing_level(edid);
+
+       for (i = 0; i < EDID_STD_TIMINGS; i++) {
+               struct std_timing *t = &edid->standard_timings[i];
+               struct drm_display_mode *newmode;
+
+               /* If std timings bytes are 1, 1 it's empty */
+               if (t->hsize == 1 && t->vfreq_aspect == 1)
+                       continue;
+
+               newmode = drm_mode_std(dev, &edid->standard_timings[i],
+                                      edid->revision, timing_level);
+               if (newmode) {
+                       drm_mode_probed_add(connector, newmode);
+                       modes++;
+               }
+       }
+
+       return modes;
+}
+
+/*
+ * XXX fix this for:
+ * - GTF secondary curve formula
+ * - EDID 1.4 range offsets
+ * - CVT extended bits
+ */
+static bool
+mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+{
+       struct detailed_data_monitor_range *range;
+       int hsync, vrefresh;
+
+       range = &timing->data.other_data.data.range;
+
+       hsync = drm_mode_hsync(mode);
+       vrefresh = drm_mode_vrefresh(mode);
+
+       if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+               return false;
+
+       if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+               return false;
+
+       if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
+               /* be forgiving since it's in units of 10MHz */
+               int max_clock = range->pixel_clock_mhz * 10 + 9;
+               max_clock *= 1000;
+               if (mode->clock > max_clock)
+                       return false;
+       }
+
+       return true;
+}
+
+/*
+ * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
+ * need to account for them.
+ */
+static int drm_gtf_modes_for_range(struct drm_connector *connector,
+                                  struct detailed_timing *timing)
+{
+       int i, modes = 0;
+       struct drm_display_mode *newmode;
+       struct drm_device *dev = connector->dev;
+
+       for (i = 0; i < drm_num_dmt_modes; i++) {
+               if (mode_in_range(drm_dmt_modes + i, timing)) {
+                       newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+       }
+
+       return modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+                        struct detailed_timing *timing)
+{
+       int i, j, modes = 0;
+       struct drm_display_mode *newmode;
+       struct drm_device *dev = connector->dev;
+       struct cvt_timing *cvt;
+       const int rates[] = { 60, 85, 75, 60, 50 };
+
+       for (i = 0; i < 4; i++) {
+               int width, height;
+               cvt = &(timing->data.other_data.data.cvt[i]);
+
+               height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
+               switch (cvt->code[1] & 0xc0) {
+               case 0x00:
+                       width = height * 4 / 3;
+                       break;
+               case 0x40:
+                       width = height * 16 / 9;
+                       break;
+               case 0x80:
+                       width = height * 16 / 10;
+                       break;
+               case 0xc0:
+                       width = height * 15 / 9;
+                       break;
+               }
+
+               for (j = 1; j < 5; j++) {
+                       if (cvt->code[2] & (1 << j)) {
+                               newmode = drm_cvt_mode(dev, width, height,
+                                                      rates[j], j == 0,
+                                                      false, false);
+                               if (newmode) {
+                                       drm_mode_probed_add(connector, newmode);
+                                       modes++;
+                               }
+                       }
+               }
+       }
+
+       return modes;
+}
+
+static int add_detailed_modes(struct drm_connector *connector,
+                             struct detailed_timing *timing,
+                             struct edid *edid, u32 quirks, int preferred)
+{
+       int i, modes = 0;
+       struct detailed_non_pixel *data = &timing->data.other_data;
+       int timing_level = standard_timing_level(edid);
+       int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+       struct drm_display_mode *newmode;
+       struct drm_device *dev = connector->dev;
+
+       if (timing->pixel_clock) {
+               newmode = drm_mode_detailed(dev, edid, timing, quirks);
+               if (!newmode)
+                       return 0;
+
+               if (preferred)
+                       newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+               drm_mode_probed_add(connector, newmode);
+               return 1;
+       }
+
+       /* other timing types */
+       switch (data->type) {
+       case EDID_DETAIL_MONITOR_RANGE:
+               if (gtf)
+                       modes += drm_gtf_modes_for_range(connector, timing);
+               break;
+       case EDID_DETAIL_STD_MODES:
+               /* Six modes per detailed section */
+               for (i = 0; i < 6; i++) {
+                       struct std_timing *std;
+                       struct drm_display_mode *newmode;
+
+                       std = &data->data.timings[i];
+                       newmode = drm_mode_std(dev, std, edid->revision,
+                                              timing_level);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+               break;
+       case EDID_DETAIL_CVT_3BYTE:
+               modes += drm_cvt_modes(connector, timing);
+               break;
+       default:
+               break;
+       }
+
+       return modes;
+}
+
+/**
+ * add_detailed_info - get detailed mode info from EDID data
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ *
+ * Some of the detailed timing sections may contain mode information.  Grab
+ * it and add it to the list.
+ */
+static int add_detailed_info(struct drm_connector *connector,
+                            struct edid *edid, u32 quirks)
+{
+       int i, modes = 0;
+
+       for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
+               struct detailed_timing *timing = &edid->detailed_timings[i];
+               int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+               /* In 1.0, only timings are allowed */
+               if (!timing->pixel_clock && edid->version == 1 &&
+                       edid->revision == 0)
+                       continue;
+
+               modes += add_detailed_modes(connector, timing, edid, quirks,
+                                           preferred);
+       }
+
+       return modes;
+}
+
+/**
+ * add_detailed_mode_eedid - get detailed mode info from addtional timing
+ *                     EDID block
+ * @connector: attached connector
+ * @edid: EDID block to scan(It is only to get addtional timing EDID block)
+ * @quirks: quirks to apply
+ *
+ * Some of the detailed timing sections may contain mode information.  Grab
+ * it and add it to the list.
+ */
+static int add_detailed_info_eedid(struct drm_connector *connector,
+                            struct edid *edid, u32 quirks)
+{
+       int i, modes = 0;
+       char *edid_ext = NULL;
+       struct detailed_timing *timing;
+       int edid_ext_num;
+       int start_offset, end_offset;
+       int timing_level;
+
+       if (edid->version == 1 && edid->revision < 3) {
+               /* If the EDID version is less than 1.3, there is no
+                * extension EDID.
+                */
+               return 0;
+       }
+       if (!edid->extensions) {
+               /* if there is no extension EDID, it is unnecessary to
+                * parse the E-EDID to get detailed info
+                */
+               return 0;
+       }
+
+       /* Chose real EDID extension number */
+       edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
+                      MAX_EDID_EXT_NUM : edid->extensions;
+
+       /* Find CEA extension */
+       for (i = 0; i < edid_ext_num; i++) {
+               edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
+               /* This block is CEA extension */
+               if (edid_ext[0] == 0x02)
+                       break;
+       }
+
+       if (i == edid_ext_num) {
+               /* if there is no additional timing EDID block, return */
+               return 0;
+       }
+
+       /* Get the start offset of detailed timing block */
+       start_offset = edid_ext[2];
+       if (start_offset == 0) {
+               /* If the start_offset is zero, it means that neither detailed
+                * info nor data block exist. In such case it is also
+                * unnecessary to parse the detailed timing info.
+                */
+               return 0;
+       }
+
+       timing_level = standard_timing_level(edid);
+       end_offset = EDID_LENGTH;
+       end_offset -= sizeof(struct detailed_timing);
+       for (i = start_offset; i < end_offset;
+                       i += sizeof(struct detailed_timing)) {
+               timing = (struct detailed_timing *)(edid_ext + i);
+               modes += add_detailed_modes(connector, timing, edid, quirks, 0);
+       }
+
+       return modes;
+}
+
+#define DDC_ADDR 0x50
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf     : EDID data buffer to be filled
+ * \param len     : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
+                         unsigned char *buf, int len)
+{
+       unsigned char start = 0x0;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = DDC_ADDR,
+                       .flags  = 0,
+                       .len    = 1,
+                       .buf    = &start,
+               }, {
+                       .addr   = DDC_ADDR,
+                       .flags  = I2C_M_RD,
+                       .len    = len,
+                       .buf    = buf,
+               }
+       };
+
+       if (i2c_transfer(adapter, msgs, 2) == 2)
+               return 0;
+
+       return -1;
+}
+EXPORT_SYMBOL(drm_do_probe_ddc_edid);
+
+static int drm_ddc_read_edid(struct drm_connector *connector,
+                            struct i2c_adapter *adapter,
+                            char *buf, int len)
+{
+       int i;
+
+       for (i = 0; i < 4; i++) {
+               if (drm_do_probe_ddc_edid(adapter, buf, len))
+                       return -1;
+               if (edid_is_valid((struct edid *)buf))
+                       return 0;
+       }
+
+       /* repeated checksum failures; warn, but carry on */
+       dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+                drm_get_connector_name(connector));
+       return -1;
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given connector's i2c channel to grab EDID data if possible.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+                         struct i2c_adapter *adapter)
+{
+       int ret;
+       struct edid *edid;
+
+       edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
+                      GFP_KERNEL);
+       if (edid == NULL) {
+               dev_warn(&connector->dev->pdev->dev,
+                        "Failed to allocate EDID\n");
+               goto end;
+       }
+
+       /* Read first EDID block */
+       ret = drm_ddc_read_edid(connector, adapter,
+                               (unsigned char *)edid, EDID_LENGTH);
+       if (ret != 0)
+               goto clean_up;
+
+       /* There are EDID extensions to be read */
+       if (edid->extensions != 0) {
+               int edid_ext_num = edid->extensions;
+
+               if (edid_ext_num > MAX_EDID_EXT_NUM) {
+                       dev_warn(&connector->dev->pdev->dev,
+                                "The number of extension(%d) is "
+                                "over max (%d), actually read number (%d)\n",
+                                edid_ext_num, MAX_EDID_EXT_NUM,
+                                MAX_EDID_EXT_NUM);
+                       /* Reset EDID extension number to be read */
+                       edid_ext_num = MAX_EDID_EXT_NUM;
+               }
+               /* Read EDID including extensions too */
+               ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
+                                       EDID_LENGTH * (edid_ext_num + 1));
+               if (ret != 0)
+                       goto clean_up;
+
+       }
+
+       connector->display_info.raw_edid = (char *)edid;
+       goto end;
+
+clean_up:
+       kfree(edid);
+       edid = NULL;
+end:
+       return edid;
+
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+#define HDMI_IDENTIFIER 0x000C03
+#define VENDOR_BLOCK    0x03
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+       char *edid_ext = NULL;
+       int i, hdmi_id, edid_ext_num;
+       int start_offset, end_offset;
+       bool is_hdmi = false;
+
+       /* No EDID or EDID extensions */
+       if (edid == NULL || edid->extensions == 0)
+               goto end;
+
+       /* Chose real EDID extension number */
+       edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
+                      MAX_EDID_EXT_NUM : edid->extensions;
+
+       /* Find CEA extension */
+       for (i = 0; i < edid_ext_num; i++) {
+               edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
+               /* This block is CEA extension */
+               if (edid_ext[0] == 0x02)
+                       break;
+       }
+
+       if (i == edid_ext_num)
+               goto end;
+
+       /* Data block offset in CEA extension block */
+       start_offset = 4;
+       end_offset = edid_ext[2];
+
+       /*
+        * Because HDMI identifier is in Vendor Specific Block,
+        * search it from all data blocks of CEA extension.
+        */
+       for (i = start_offset; i < end_offset;
+               /* Increased by data block len */
+               i += ((edid_ext[i] & 0x1f) + 1)) {
+               /* Find vendor specific block */
+               if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
+                       hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
+                                 edid_ext[i + 3] << 16;
+                       /* Find HDMI identifier */
+                       if (hdmi_id == HDMI_IDENTIFIER)
+                               is_hdmi = true;
+                       break;
+               }
+       }
+
+end:
+       return is_hdmi;
+}
+EXPORT_SYMBOL(drm_detect_hdmi_monitor);
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+{
+       int num_modes = 0;
+       u32 quirks;
+
+       if (edid == NULL) {
+               return 0;
+       }
+       if (!edid_is_valid(edid)) {
+               dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+                        drm_get_connector_name(connector));
+               return 0;
+       }
+
+       quirks = edid_get_quirks(edid);
+
+       num_modes += add_established_modes(connector, edid);
+       num_modes += add_standard_modes(connector, edid);
+       num_modes += add_detailed_info(connector, edid, quirks);
+       num_modes += add_detailed_info_eedid(connector, edid, quirks);
+
+       if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+               edid_fixup_preferred(connector, quirks);
+
+       connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
+       connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
+       connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
+       connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
+       connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
+       connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
+       connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
+       connector->display_info.width_mm = edid->width_cm * 10;
+       connector->display_info.height_mm = edid->height_cm * 10;
+       connector->display_info.gamma = edid->gamma;
+       connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
+       connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
+       connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
+       connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
+       connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
+       connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
+       connector->display_info.gamma = edid->gamma;
+
+       return num_modes;
+}
+EXPORT_SYMBOL(drm_add_edid_modes);
+
+/**
+ * drm_add_modes_noedid - add modes for the connectors without EDID
+ * @connector: connector we're probing
+ * @hdisplay: the horizontal display limit
+ * @vdisplay: the vertical display limit
+ *
+ * Add the specified modes to the connector's mode list. Only when the
+ * hdisplay/vdisplay is not beyond the given limit, it will be added.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_modes_noedid(struct drm_connector *connector,
+                       int hdisplay, int vdisplay)
+{
+       int i, count, num_modes = 0;
+       struct drm_display_mode *mode, *ptr;
+       struct drm_device *dev = connector->dev;
+
+       count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+       if (hdisplay < 0)
+               hdisplay = 0;
+       if (vdisplay < 0)
+               vdisplay = 0;
+
+       for (i = 0; i < count; i++) {
+               ptr = &drm_dmt_modes[i];
+               if (hdisplay && vdisplay) {
+                       /*
+                        * Only when two are valid, they will be used to check
+                        * whether the mode should be added to the mode list of
+                        * the connector.
+                        */
+                       if (ptr->hdisplay > hdisplay ||
+                                       ptr->vdisplay > vdisplay)
+                               continue;
+               }
+               if (drm_mode_vrefresh(ptr) > 61)
+                       continue;
+               mode = drm_mode_duplicate(dev, ptr);
+               if (mode) {
+                       drm_mode_probed_add(connector, mode);
+                       num_modes++;
+               }
+       }
+       return num_modes;
+}
+EXPORT_SYMBOL(drm_add_modes_noedid);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_encoder_slave.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_encoder_slave.c
new file mode 100644 (file)
index 0000000..f018469
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drm_encoder_slave.h"
+
+/**
+ * drm_i2c_encoder_init - Initialize an I2C slave encoder
+ * @dev:       DRM device.
+ * @encoder:    Encoder to be attached to the I2C device. You aren't
+ *             required to have called drm_encoder_init() before.
+ * @adap:      I2C adapter that will be used to communicate with
+ *             the device.
+ * @info:      Information that will be used to create the I2C device.
+ *             Required fields are @addr and @type.
+ *
+ * Create an I2C device on the specified bus (the module containing its
+ * driver is transparently loaded) and attach it to the specified
+ * &drm_encoder_slave. The @slave_funcs field will be initialized with
+ * the hooks provided by the slave driver.
+ *
+ * Returns 0 on success or a negative errno on failure, in particular,
+ * -ENODEV is returned when no matching driver is found.
+ */
+int drm_i2c_encoder_init(struct drm_device *dev,
+                        struct drm_encoder_slave *encoder,
+                        struct i2c_adapter *adap,
+                        const struct i2c_board_info *info)
+{
+       char modalias[sizeof(I2C_MODULE_PREFIX)
+                     + I2C_NAME_SIZE];
+       struct module *module = NULL;
+       struct i2c_client *client;
+       struct drm_i2c_encoder_driver *encoder_drv;
+       int err = 0;
+
+       snprintf(modalias, sizeof(modalias),
+                "%s%s", I2C_MODULE_PREFIX, info->type);
+       request_module(modalias);
+
+       client = i2c_new_device(adap, info);
+       if (!client) {
+               err = -ENOMEM;
+               goto fail;
+       }
+
+       if (!client->driver) {
+               err = -ENODEV;
+               goto fail_unregister;
+       }
+
+       module = client->driver->driver.owner;
+       if (!try_module_get(module)) {
+               err = -ENODEV;
+               goto fail_unregister;
+       }
+
+       encoder->bus_priv = client;
+
+       encoder_drv = to_drm_i2c_encoder_driver(client->driver);
+
+       err = encoder_drv->encoder_init(client, dev, encoder);
+       if (err)
+               goto fail_unregister;
+
+       return 0;
+
+fail_unregister:
+       i2c_unregister_device(client);
+       module_put(module);
+fail:
+       return err;
+}
+EXPORT_SYMBOL(drm_i2c_encoder_init);
+
+/**
+ * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
+ * @drm_encoder:       Encoder to be unregistered.
+ *
+ * This should be called from the @destroy method of an I2C slave
+ * encoder driver once I2C access is no longer needed.
+ */
+void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
+{
+       struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
+       struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
+       struct module *module = client->driver->driver.owner;
+
+       i2c_unregister_device(client);
+       encoder->bus_priv = NULL;
+
+       module_put(module);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_destroy);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_fops.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_fops.c
new file mode 100644 (file)
index 0000000..08d14df
--- /dev/null
@@ -0,0 +1,660 @@
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Daryll Strauss <daryll@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include <linux/poll.h>
+#include <linux/smp_lock.h>
+
+static int drm_open_helper(struct inode *inode, struct file *filp,
+                          struct drm_device * dev);
+
+static int drm_setup(struct drm_device * dev)
+{
+       int i;
+       int ret;
+
+       if (dev->driver->firstopen) {
+               ret = dev->driver->firstopen(dev);
+               if (ret != 0)
+                       return ret;
+       }
+
+       atomic_set(&dev->ioctl_count, 0);
+       atomic_set(&dev->vma_count, 0);
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               dev->buf_use = 0;
+               atomic_set(&dev->buf_alloc, 0);
+
+               i = drm_dma_setup(dev);
+               if (i < 0)
+                       return i;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+               atomic_set(&dev->counts[i], 0);
+
+       dev->sigdata.lock = NULL;
+
+       dev->queue_count = 0;
+       dev->queue_reserved = 0;
+       dev->queue_slots = 0;
+       dev->queuelist = NULL;
+       dev->context_flag = 0;
+       dev->interrupt_flag = 0;
+       dev->dma_flag = 0;
+       dev->last_context = 0;
+       dev->last_switch = 0;
+       dev->last_checked = 0;
+       init_waitqueue_head(&dev->context_wait);
+       dev->if_version = 0;
+
+       dev->ctx_start = 0;
+       dev->lck_start = 0;
+
+       dev->buf_async = NULL;
+       init_waitqueue_head(&dev->buf_readers);
+       init_waitqueue_head(&dev->buf_writers);
+
+       DRM_DEBUG("\n");
+
+       /*
+        * The kernel's context could be created here, but is now created
+        * in drm_dma_enqueue.  This is more resource-efficient for
+        * hardware that does not do DMA, but may mean that
+        * drm_select_queue fails between the time the interrupt is
+        * initialized and the time the queues are initialized.
+        */
+
+       return 0;
+}
+
+/**
+ * Open file.
+ *
+ * \param inode device inode
+ * \param filp file pointer.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the DRM device with the same minor number, calls open_helper(), and
+ * increments the device open count. If the open count was previous at zero,
+ * i.e., it's the first that the device is open, then calls setup().
+ */
+int drm_open(struct inode *inode, struct file *filp)
+{
+       struct drm_device *dev = NULL;
+       int minor_id = iminor(inode);
+       struct drm_minor *minor;
+       int retcode = 0;
+
+       minor = idr_find(&drm_minors_idr, minor_id);
+       if (!minor)
+               return -ENODEV;
+
+       if (!(dev = minor->dev))
+               return -ENODEV;
+
+       retcode = drm_open_helper(inode, filp, dev);
+       if (!retcode) {
+               atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+               spin_lock(&dev->count_lock);
+               if (!dev->open_count++) {
+                       spin_unlock(&dev->count_lock);
+                       retcode = drm_setup(dev);
+                       goto out;
+               }
+               spin_unlock(&dev->count_lock);
+       }
+out:
+       mutex_lock(&dev->struct_mutex);
+       if (minor->type == DRM_MINOR_LEGACY) {
+               BUG_ON((dev->dev_mapping != NULL) &&
+                       (dev->dev_mapping != inode->i_mapping));
+               if (dev->dev_mapping == NULL)
+                       dev->dev_mapping = inode->i_mapping;
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return retcode;
+}
+EXPORT_SYMBOL(drm_open);
+
+/**
+ * File \c open operation.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ *
+ * Puts the dev->fops corresponding to the device minor number into
+ * \p filp, call the \c open method, and restore the file operations.
+ */
+int drm_stub_open(struct inode *inode, struct file *filp)
+{
+       struct drm_device *dev = NULL;
+       struct drm_minor *minor;
+       int minor_id = iminor(inode);
+       int err = -ENODEV;
+       const struct file_operations *old_fops;
+
+       DRM_DEBUG("\n");
+
+       /* BKL pushdown: note that nothing else serializes idr_find() */
+       lock_kernel();
+       minor = idr_find(&drm_minors_idr, minor_id);
+       if (!minor)
+               goto out;
+
+       if (!(dev = minor->dev))
+               goto out;
+
+       old_fops = filp->f_op;
+       filp->f_op = fops_get(&dev->driver->fops);
+       if (filp->f_op == NULL) {
+               filp->f_op = old_fops;
+               goto out;
+       }
+       if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
+               fops_put(filp->f_op);
+               filp->f_op = fops_get(old_fops);
+       }
+       fops_put(old_fops);
+
+out:
+       unlock_kernel();
+       return err;
+}
+
+/**
+ * Check whether DRI will run on this CPU.
+ *
+ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
+ */
+static int drm_cpu_valid(void)
+{
+#if defined(__i386__)
+       if (boot_cpu_data.x86 == 3)
+               return 0;       /* No cmpxchg on a 386 */
+#endif
+#if defined(__sparc__) && !defined(__sparc_v9__)
+       return 0;               /* No cmpxchg before v9 sparc. */
+#endif
+       return 1;
+}
+
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param dev device.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct inode *inode, struct file *filp,
+                          struct drm_device * dev)
+{
+       int minor_id = iminor(inode);
+       struct drm_file *priv;
+       int ret;
+
+       if (filp->f_flags & O_EXCL)
+               return -EBUSY;  /* No exclusive opens */
+       if (!drm_cpu_valid())
+               return -EINVAL;
+
+       DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
+
+       priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       memset(priv, 0, sizeof(*priv));
+       filp->private_data = priv;
+       priv->filp = filp;
+       priv->uid = current_euid();
+       priv->pid = task_pid_nr(current);
+       priv->minor = idr_find(&drm_minors_idr, minor_id);
+       priv->ioctl_count = 0;
+       /* for compatibility root is always authenticated */
+       priv->authenticated = capable(CAP_SYS_ADMIN);
+       priv->lock_count = 0;
+
+       INIT_LIST_HEAD(&priv->lhead);
+       INIT_LIST_HEAD(&priv->fbs);
+       INIT_LIST_HEAD(&priv->event_list);
+       init_waitqueue_head(&priv->event_wait);
+       priv->event_space = 4096; /* set aside 4k for event buffer */
+
+       if (dev->driver->driver_features & DRIVER_GEM)
+               drm_gem_open(dev, priv);
+
+       if (dev->driver->open) {
+               ret = dev->driver->open(dev, priv);
+               if (ret < 0)
+                       goto out_free;
+       }
+
+
+       /* if there is no current master make this fd it */
+       mutex_lock(&dev->struct_mutex);
+       if (!priv->minor->master) {
+               /* create a new master */
+               priv->minor->master = drm_master_create(priv->minor);
+               if (!priv->minor->master) {
+                       mutex_unlock(&dev->struct_mutex);
+                       ret = -ENOMEM;
+                       goto out_free;
+               }
+
+               priv->is_master = 1;
+               /* take another reference for the copy in the local file priv */
+               priv->master = drm_master_get(priv->minor->master);
+
+               priv->authenticated = 1;
+
+               mutex_unlock(&dev->struct_mutex);
+               if (dev->driver->master_create) {
+                       ret = dev->driver->master_create(dev, priv->master);
+                       if (ret) {
+                               mutex_lock(&dev->struct_mutex);
+                               /* drop both references if this fails */
+                               drm_master_put(&priv->minor->master);
+                               drm_master_put(&priv->master);
+                               mutex_unlock(&dev->struct_mutex);
+                               goto out_free;
+                       }
+               }
+               mutex_lock(&dev->struct_mutex);
+               if (dev->driver->master_set) {
+                       ret = dev->driver->master_set(dev, priv, true);
+                       if (ret) {
+                               /* drop both references if this fails */
+                               drm_master_put(&priv->minor->master);
+                               drm_master_put(&priv->master);
+                               mutex_unlock(&dev->struct_mutex);
+                               goto out_free;
+                       }
+               }
+               mutex_unlock(&dev->struct_mutex);
+       } else {
+               /* get a reference to the master */
+               priv->master = drm_master_get(priv->minor->master);
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       list_add(&priv->lhead, &dev->filelist);
+       mutex_unlock(&dev->struct_mutex);
+
+#ifdef __alpha__
+       /*
+        * Default the hose
+        */
+       if (!dev->hose) {
+               struct pci_dev *pci_dev;
+               pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+               if (pci_dev) {
+                       dev->hose = pci_dev->sysdata;
+                       pci_dev_put(pci_dev);
+               }
+               if (!dev->hose) {
+                       struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+                       if (b)
+                               dev->hose = b->sysdata;
+               }
+       }
+#endif
+
+       return 0;
+      out_free:
+       kfree(priv);
+       filp->private_data = NULL;
+       return ret;
+}
+
+/** No-op. */
+int drm_fasync(int fd, struct file *filp, int on)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+
+       DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
+                 (long)old_encode_dev(priv->minor->device));
+       return fasync_helper(fd, filp, on, &dev->buf_async);
+}
+EXPORT_SYMBOL(drm_fasync);
+
+/*
+ * Reclaim locked buffers; note that this may be a bad idea if the current
+ * context doesn't have the hw lock...
+ */
+static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
+{
+       struct drm_file *file_priv = f->private_data;
+
+       if (drm_i_have_hw_lock(dev, file_priv)) {
+               dev->driver->reclaim_buffers_locked(dev, file_priv);
+       } else {
+               unsigned long _end = jiffies + 3 * DRM_HZ;
+               int locked = 0;
+
+               drm_idlelock_take(&file_priv->master->lock);
+
+               /*
+                * Wait for a while.
+                */
+               do {
+                       spin_lock_bh(&file_priv->master->lock.spinlock);
+                       locked = file_priv->master->lock.idle_has_lock;
+                       spin_unlock_bh(&file_priv->master->lock.spinlock);
+                       if (locked)
+                               break;
+                       schedule();
+               } while (!time_after_eq(jiffies, _end));
+
+               if (!locked) {
+                       DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
+                                 "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
+                                 "\tI will go on reclaiming the buffers anyway.\n");
+               }
+
+               dev->driver->reclaim_buffers_locked(dev, file_priv);
+               drm_idlelock_release(&file_priv->master->lock);
+       }
+}
+
+static void drm_master_release(struct drm_device *dev, struct file *filp)
+{
+       struct drm_file *file_priv = filp->private_data;
+
+       if (dev->driver->reclaim_buffers_locked &&
+           file_priv->master->lock.hw_lock)
+               drm_reclaim_locked_buffers(dev, filp);
+
+       if (dev->driver->reclaim_buffers_idlelocked &&
+           file_priv->master->lock.hw_lock) {
+               drm_idlelock_take(&file_priv->master->lock);
+               dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
+               drm_idlelock_release(&file_priv->master->lock);
+       }
+
+
+       if (drm_i_have_hw_lock(dev, file_priv)) {
+               DRM_DEBUG("File %p released, freeing lock for context %d\n",
+                         filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+               drm_lock_free(&file_priv->master->lock,
+                             _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !dev->driver->reclaim_buffers_locked) {
+               dev->driver->reclaim_buffers(dev, file_priv);
+       }
+}
+
+static void drm_events_release(struct drm_file *file_priv)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_pending_event *e, *et;
+       struct drm_pending_vblank_event *v, *vt;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       /* Remove pending flips */
+       list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
+               if (v->base.file_priv == file_priv) {
+                       list_del(&v->base.link);
+                       drm_vblank_put(dev, v->pipe);
+                       v->base.destroy(&v->base);
+               }
+
+       /* Remove unconsumed events */
+       list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+               e->destroy(e);
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * Release file.
+ *
+ * \param inode device inode
+ * \param file_priv DRM file private.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the hardware lock is held then free it, and take it again for the kernel
+ * context since it's necessary to reclaim buffers. Unlink the file private
+ * data from its list and free it. Decreases the open count and if it reaches
+ * zero calls drm_lastclose().
+ */
+int drm_release(struct inode *inode, struct file *filp)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev = file_priv->minor->dev;
+       int retcode = 0;
+
+       lock_kernel();
+
+       DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+       if (dev->driver->preclose)
+               dev->driver->preclose(dev, file_priv);
+
+       /* ========================================================
+        * Begin inline drm_release
+        */
+
+       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+                 task_pid_nr(current),
+                 (long)old_encode_dev(file_priv->minor->device),
+                 dev->open_count);
+
+       /* if the master has gone away we can't do anything with the lock */
+       if (file_priv->minor->master)
+               drm_master_release(dev, filp);
+
+       drm_events_release(file_priv);
+
+       if (dev->driver->driver_features & DRIVER_GEM)
+               drm_gem_release(dev, file_priv);
+
+       if (dev->driver->driver_features & DRIVER_MODESET)
+               drm_fb_release(file_priv);
+
+       mutex_lock(&dev->ctxlist_mutex);
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->tag == file_priv &&
+                           pos->handle != DRM_KERNEL_CONTEXT) {
+                               if (dev->driver->context_dtor)
+                                       dev->driver->context_dtor(dev,
+                                                                 pos->handle);
+
+                               drm_ctxbitmap_free(dev, pos->handle);
+
+                               list_del(&pos->head);
+                               kfree(pos);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (file_priv->is_master) {
+               struct drm_master *master = file_priv->master;
+               struct drm_file *temp;
+               list_for_each_entry(temp, &dev->filelist, lhead) {
+                       if ((temp->master == file_priv->master) &&
+                           (temp != file_priv))
+                               temp->authenticated = 0;
+               }
+
+               /**
+                * Since the master is disappearing, so is the
+                * possibility to lock.
+                */
+
+               if (master->lock.hw_lock) {
+                       if (dev->sigdata.lock == master->lock.hw_lock)
+                               dev->sigdata.lock = NULL;
+                       master->lock.hw_lock = NULL;
+                       master->lock.file_priv = NULL;
+                       wake_up_interruptible_all(&master->lock.lock_queue);
+               }
+
+               if (file_priv->minor->master == file_priv->master) {
+                       /* drop the reference held my the minor */
+                       if (dev->driver->master_drop)
+                               dev->driver->master_drop(dev, file_priv, true);
+                       drm_master_put(&file_priv->minor->master);
+               }
+       }
+
+       /* drop the reference held my the file priv */
+       drm_master_put(&file_priv->master);
+       file_priv->is_master = 0;
+       list_del(&file_priv->lhead);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (dev->driver->postclose)
+               dev->driver->postclose(dev, file_priv);
+       kfree(file_priv);
+
+       /* ========================================================
+        * End inline drm_release
+        */
+
+       atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+       spin_lock(&dev->count_lock);
+       if (!--dev->open_count) {
+               if (atomic_read(&dev->ioctl_count)) {
+                       DRM_ERROR("Device busy: %d\n",
+                                 atomic_read(&dev->ioctl_count));
+                       spin_unlock(&dev->count_lock);
+                       unlock_kernel();
+                       return -EBUSY;
+               }
+               spin_unlock(&dev->count_lock);
+               unlock_kernel();
+               return drm_lastclose(dev);
+       }
+       spin_unlock(&dev->count_lock);
+
+       unlock_kernel();
+
+       return retcode;
+}
+EXPORT_SYMBOL(drm_release);
+
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+                 size_t total, size_t max, struct drm_pending_event **out)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_pending_event *e;
+       unsigned long flags;
+       bool ret = false;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       *out = NULL;
+       if (list_empty(&file_priv->event_list))
+               goto out;
+       e = list_first_entry(&file_priv->event_list,
+                            struct drm_pending_event, link);
+       if (e->event->length + total > max)
+               goto out;
+
+       file_priv->event_space += e->event->length;
+       list_del(&e->link);
+       *out = e;
+       ret = true;
+
+out:
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+       return ret;
+}
+
+ssize_t drm_read(struct file *filp, char __user *buffer,
+                size_t count, loff_t *offset)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_pending_event *e;
+       size_t total;
+       ssize_t ret;
+
+       ret = wait_event_interruptible(file_priv->event_wait,
+                                      !list_empty(&file_priv->event_list));
+       if (ret < 0)
+               return ret;
+
+       total = 0;
+       while (drm_dequeue_event(file_priv, total, count, &e)) {
+               if (copy_to_user(buffer + total,
+                                e->event, e->event->length)) {
+                       total = -EFAULT;
+                       break;
+               }
+
+               total += e->event->length;
+               e->destroy(e);
+       }
+
+       return total;
+}
+EXPORT_SYMBOL(drm_read);
+
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+{
+       struct drm_file *file_priv = filp->private_data;
+       unsigned int mask = 0;
+
+       poll_wait(filp, &file_priv->event_wait, wait);
+
+       if (!list_empty(&file_priv->event_list))
+               mask |= POLLIN | POLLRDNORM;
+
+       return mask;
+}
+EXPORT_SYMBOL(drm_poll);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_gem.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_gem.c
new file mode 100644 (file)
index 0000000..e9dbb48
--- /dev/null
@@ -0,0 +1,573 @@
+/*
+ * Copyright Â© 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include "drmP.h"
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls.  However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file.  However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ *   default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ *   handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls.  The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+       struct drm_gem_mm *mm;
+
+       spin_lock_init(&dev->object_name_lock);
+       idr_init(&dev->object_name_idr);
+       atomic_set(&dev->object_count, 0);
+       atomic_set(&dev->object_memory, 0);
+       atomic_set(&dev->pin_count, 0);
+       atomic_set(&dev->pin_memory, 0);
+       atomic_set(&dev->gtt_count, 0);
+       atomic_set(&dev->gtt_memory, 0);
+
+       mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
+       if (!mm) {
+               DRM_ERROR("out of memory\n");
+               return -ENOMEM;
+       }
+
+       dev->mm_private = mm;
+
+       if (drm_ht_create(&mm->offset_hash, 19)) {
+               kfree(mm);
+               return -ENOMEM;
+       }
+
+       if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+                       DRM_FILE_PAGE_OFFSET_SIZE)) {
+               drm_ht_remove(&mm->offset_hash);
+               kfree(mm);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void
+drm_gem_destroy(struct drm_device *dev)
+{
+       struct drm_gem_mm *mm = dev->mm_private;
+
+       drm_mm_takedown(&mm->offset_manager);
+       drm_ht_remove(&mm->offset_hash);
+       kfree(mm);
+       dev->mm_private = NULL;
+}
+
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+       struct drm_gem_object *obj;
+
+       BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (!obj)
+               goto free;
+
+       obj->dev = dev;
+       obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+       if (IS_ERR(obj->filp))
+               goto free;
+
+       /* Basically we want to disable the OOM killer and handle ENOMEM
+        * ourselves by sacrificing pages from cached buffers.
+        * XXX shmem_file_[gs]et_gfp_mask()
+        */
+       mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
+                            GFP_HIGHUSER |
+                            __GFP_COLD |
+                            __GFP_FS |
+                            __GFP_RECLAIMABLE |
+                            __GFP_NORETRY |
+                            __GFP_NOWARN |
+                            __GFP_NOMEMALLOC);
+
+       kref_init(&obj->refcount);
+       kref_init(&obj->handlecount);
+       obj->size = size;
+       if (dev->driver->gem_init_object != NULL &&
+           dev->driver->gem_init_object(obj) != 0) {
+               goto fput;
+       }
+       atomic_inc(&dev->object_count);
+       atomic_add(obj->size, &dev->object_memory);
+       return obj;
+fput:
+       fput(obj->filp);
+free:
+       kfree(obj);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_gem_object_alloc);
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+{
+       struct drm_device *dev;
+       struct drm_gem_object *obj;
+
+       /* This is gross. The idr system doesn't let us try a delete and
+        * return an error code.  It just spews if you fail at deleting.
+        * So, we have to grab a lock around finding the object and then
+        * doing the delete on it and dropping the refcount, or the user
+        * could race us to double-decrement the refcount and cause a
+        * use-after-free later.  Given the frequency of our handle lookups,
+        * we may want to use ida for number allocation and a hash table
+        * for the pointers, anyway.
+        */
+       spin_lock(&filp->table_lock);
+
+       /* Check if we currently have a reference on the object */
+       obj = idr_find(&filp->object_idr, handle);
+       if (obj == NULL) {
+               spin_unlock(&filp->table_lock);
+               return -EINVAL;
+       }
+       dev = obj->dev;
+
+       /* Release reference and decrement refcount. */
+       idr_remove(&filp->object_idr, handle);
+       spin_unlock(&filp->table_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_handle_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+                      struct drm_gem_object *obj,
+                      u32 *handlep)
+{
+       int     ret;
+
+       /*
+        * Get the user-visible handle using idr.
+        */
+again:
+       /* ensure there is space available to allocate a handle */
+       if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+               return -ENOMEM;
+
+       /* do the allocation under our spinlock */
+       spin_lock(&file_priv->table_lock);
+       ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
+       spin_unlock(&file_priv->table_lock);
+       if (ret == -EAGAIN)
+               goto again;
+
+       if (ret != 0)
+               return ret;
+
+       drm_gem_object_handle_reference(obj);
+       return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+                     u32 handle)
+{
+       struct drm_gem_object *obj;
+
+       spin_lock(&filp->table_lock);
+
+       /* Check if we currently have a reference on the object */
+       obj = idr_find(&filp->object_idr, handle);
+       if (obj == NULL) {
+               spin_unlock(&filp->table_lock);
+               return NULL;
+       }
+
+       drm_gem_object_reference(obj);
+
+       spin_unlock(&filp->table_lock);
+
+       return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_gem_close *args = data;
+       int ret;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       ret = drm_gem_handle_delete(file_priv, args->handle);
+
+       return ret;
+}
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_gem_flink *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -EBADF;
+
+again:
+       if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       spin_lock(&dev->object_name_lock);
+       if (!obj->name) {
+               ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
+                                       &obj->name);
+               args->name = (uint64_t) obj->name;
+               spin_unlock(&dev->object_name_lock);
+
+               if (ret == -EAGAIN)
+                       goto again;
+
+               if (ret != 0)
+                       goto err;
+
+               /* Allocate a reference for the name table.  */
+               drm_gem_object_reference(obj);
+       } else {
+               args->name = (uint64_t) obj->name;
+               spin_unlock(&dev->object_name_lock);
+               ret = 0;
+       }
+
+err:
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv)
+{
+       struct drm_gem_open *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+       u32 handle;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       spin_lock(&dev->object_name_lock);
+       obj = idr_find(&dev->object_name_idr, (int) args->name);
+       if (obj)
+               drm_gem_object_reference(obj);
+       spin_unlock(&dev->object_name_lock);
+       if (!obj)
+               return -ENOENT;
+
+       ret = drm_gem_handle_create(file_priv, obj, &handle);
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       args->handle = handle;
+       args->size = obj->size;
+
+       return 0;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+       idr_init(&file_private->object_idr);
+       spin_lock_init(&file_private->table_lock);
+}
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+       struct drm_gem_object *obj = ptr;
+
+       drm_gem_object_handle_unreference(obj);
+
+       return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_for_each(&file_private->object_idr,
+                    &drm_gem_object_release_handle, NULL);
+
+       idr_destroy(&file_private->object_idr);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Called after the last reference to the object has been lost.
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+       struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+       struct drm_device *dev = obj->dev;
+
+       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (dev->driver->gem_free_object != NULL)
+               dev->driver->gem_free_object(obj);
+
+       fput(obj->filp);
+       atomic_dec(&dev->object_count);
+       atomic_sub(obj->size, &dev->object_memory);
+       kfree(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void
+drm_gem_object_handle_free(struct kref *kref)
+{
+       struct drm_gem_object *obj = container_of(kref,
+                                                 struct drm_gem_object,
+                                                 handlecount);
+       struct drm_device *dev = obj->dev;
+
+       /* Remove any name for this object */
+       spin_lock(&dev->object_name_lock);
+       if (obj->name) {
+               idr_remove(&dev->object_name_idr, obj->name);
+               obj->name = 0;
+               spin_unlock(&dev->object_name_lock);
+               /*
+                * The object name held a reference to this object, drop
+                * that now.
+                */
+               drm_gem_object_unreference(obj);
+       } else
+               spin_unlock(&dev->object_name_lock);
+
+}
+EXPORT_SYMBOL(drm_gem_object_handle_free);
+
+void drm_gem_vm_open(struct vm_area_struct *vma)
+{
+       struct drm_gem_object *obj = vma->vm_private_data;
+
+       drm_gem_object_reference(obj);
+}
+EXPORT_SYMBOL(drm_gem_vm_open);
+
+void drm_gem_vm_close(struct vm_area_struct *vma)
+{
+       struct drm_gem_object *obj = vma->vm_private_data;
+       struct drm_device *dev = obj->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_vm_close);
+
+
+/**
+ * drm_gem_mmap - memory map routine for GEM objects
+ * @filp: DRM file pointer
+ * @vma: VMA for the area to be mapped
+ *
+ * If a driver supports GEM object mapping, mmap calls on the DRM file
+ * descriptor will end up here.
+ *
+ * If we find the object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created when the GTT map ioctl was called on
+ * the object), we set up the driver fault handler so that any accesses
+ * to the object can be trapped, to perform migration, GTT binding, surface
+ * register allocation, or performance monitoring.
+ */
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_gem_mm *mm = dev->mm_private;
+       struct drm_local_map *map = NULL;
+       struct drm_gem_object *obj;
+       struct drm_hash_item *hash;
+       int ret = 0;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+               mutex_unlock(&dev->struct_mutex);
+               return drm_mmap(filp, vma);
+       }
+
+       map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+       if (!map ||
+           ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
+               ret =  -EPERM;
+               goto out_unlock;
+       }
+
+       /* Check for valid size. */
+       if (map->size < vma->vm_end - vma->vm_start) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       obj = map->handle;
+       if (!obj->dev->driver->gem_vm_ops) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+       vma->vm_ops = obj->dev->driver->gem_vm_ops;
+       vma->vm_private_data = map->handle;
+       vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+       /* Take a ref for this mapping of the object, so that the fault
+        * handler can dereference the mmap offset's pointer to the object.
+        * This reference is cleaned up by the corresponding vm_close
+        * (which should happen whether the vma was created by this call, or
+        * by a vm_open due to mremap or partial unmap or whatever).
+        */
+       drm_gem_object_reference(obj);
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_gem_mmap);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_hashtab.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_hashtab.c
new file mode 100644 (file)
index 0000000..f36b21c
--- /dev/null
@@ -0,0 +1,206 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm_hashtab.h"
+#include <linux/hash.h>
+
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
+{
+       unsigned int i;
+
+       ht->size = 1 << order;
+       ht->order = order;
+       ht->fill = 0;
+       ht->table = NULL;
+       ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
+       if (!ht->use_vmalloc) {
+               ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
+       }
+       if (!ht->table) {
+               ht->use_vmalloc = 1;
+               ht->table = vmalloc(ht->size*sizeof(*ht->table));
+       }
+       if (!ht->table) {
+               DRM_ERROR("Out of memory for hash table\n");
+               return -ENOMEM;
+       }
+       for (i=0; i< ht->size; ++i) {
+               INIT_HLIST_HEAD(&ht->table[i]);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_create);
+
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list;
+       unsigned int hashed_key;
+       int count = 0;
+
+       hashed_key = hash_long(key, ht->order);
+       DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+       h_list = &ht->table[hashed_key];
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+       }
+}
+
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
+                                         unsigned long key)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list;
+       unsigned int hashed_key;
+
+       hashed_key = hash_long(key, ht->order);
+       h_list = &ht->table[hashed_key];
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               if (entry->key == key)
+                       return list;
+               if (entry->key > key)
+                       break;
+       }
+       return NULL;
+}
+
+
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list, *parent;
+       unsigned int hashed_key;
+       unsigned long key = item->key;
+
+       hashed_key = hash_long(key, ht->order);
+       h_list = &ht->table[hashed_key];
+       parent = NULL;
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               if (entry->key == key)
+                       return -EINVAL;
+               if (entry->key > key)
+                       break;
+               parent = list;
+       }
+       if (parent) {
+               hlist_add_after(parent, &item->head);
+       } else {
+               hlist_add_head(&item->head, h_list);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_insert_item);
+
+/*
+ * Just insert an item and return any "bits" bit key that hasn't been
+ * used before.
+ */
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+                             unsigned long seed, int bits, int shift,
+                             unsigned long add)
+{
+       int ret;
+       unsigned long mask = (1 << bits) - 1;
+       unsigned long first, unshifted_key;
+
+       unshifted_key = hash_long(seed, bits);
+       first = unshifted_key;
+       do {
+               item->key = (unshifted_key << shift) + add;
+               ret = drm_ht_insert_item(ht, item);
+               if (ret)
+                       unshifted_key = (unshifted_key + 1) & mask;
+       } while(ret && (unshifted_key != first));
+
+       if (ret) {
+               DRM_ERROR("Available key bit space exhausted\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_just_insert_please);
+
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+                    struct drm_hash_item **item)
+{
+       struct hlist_node *list;
+
+       list = drm_ht_find_key(ht, key);
+       if (!list)
+               return -EINVAL;
+
+       *item = hlist_entry(list, struct drm_hash_item, head);
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_find_item);
+
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
+{
+       struct hlist_node *list;
+
+       list = drm_ht_find_key(ht, key);
+       if (list) {
+               hlist_del_init(list);
+               ht->fill--;
+               return 0;
+       }
+       return -EINVAL;
+}
+
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+       hlist_del_init(&item->head);
+       ht->fill--;
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_remove_item);
+
+void drm_ht_remove(struct drm_open_hash *ht)
+{
+       if (ht->table) {
+               if (ht->use_vmalloc)
+                       vfree(ht->table);
+               else
+                       kfree(ht->table);
+               ht->table = NULL;
+       }
+}
+EXPORT_SYMBOL(drm_ht_remove);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_info.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_info.c
new file mode 100644 (file)
index 0000000..f0f6c6b
--- /dev/null
@@ -0,0 +1,328 @@
+/**
+ * \file drm_info.c
+ * DRM info file implementations
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+/**
+ * Called when "/proc/dri/.../name" is read.
+ *
+ * Prints the device name together with the bus id if available.
+ */
+int drm_name_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_minor *minor = node->minor;
+       struct drm_device *dev = minor->dev;
+       struct drm_master *master = minor->master;
+
+       if (!master)
+               return 0;
+
+       if (master->unique) {
+               seq_printf(m, "%s %s %s\n",
+                          dev->driver->pci_driver.name,
+                          pci_name(dev->pdev), master->unique);
+       } else {
+               seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
+                          pci_name(dev->pdev));
+       }
+
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vm" is read.
+ *
+ * Prints information about all mappings in drm_device::maplist.
+ */
+int drm_vm_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_local_map *map;
+       struct drm_map_list *r_list;
+
+       /* Hardcoded from _DRM_FRAME_BUFFER,
+          _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+          _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+       const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+       const char *type;
+       int i;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "slot      offset       size type flags    address mtrr\n\n");
+       i = 0;
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               map = r_list->map;
+               if (!map)
+                       continue;
+               if (map->type < 0 || map->type > 5)
+                       type = "??";
+               else
+                       type = types[map->type];
+
+               seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s  0x%02x 0x%08lx ",
+                          i,
+                          (unsigned long long)map->offset,
+                          map->size, type, map->flags,
+                          (unsigned long) r_list->user_token);
+               if (map->mtrr < 0)
+                       seq_printf(m, "none\n");
+               else
+                       seq_printf(m, "%4d\n", map->mtrr);
+               i++;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../queues" is read.
+ */
+int drm_queues_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       int i;
+       struct drm_queue *q;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "  ctx/flags   use   fin"
+                  "   blk/rw/rwf  wait    flushed         queued"
+                  "      locks\n\n");
+       for (i = 0; i < dev->queue_count; i++) {
+               q = dev->queuelist[i];
+               atomic_inc(&q->use_count);
+               seq_printf(m,   "%5d/0x%03x %5d %5d"
+                          " %5d/%c%c/%c%c%c %5Zd\n",
+                          i,
+                          q->flags,
+                          atomic_read(&q->use_count),
+                          atomic_read(&q->finalization),
+                          atomic_read(&q->block_count),
+                          atomic_read(&q->block_read) ? 'r' : '-',
+                          atomic_read(&q->block_write) ? 'w' : '-',
+                          waitqueue_active(&q->read_queue) ? 'r' : '-',
+                          waitqueue_active(&q->write_queue) ? 'w' : '-',
+                          waitqueue_active(&q->flush_queue) ? 'f' : '-',
+                          DRM_BUFCOUNT(&q->waitlist));
+               atomic_dec(&q->use_count);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../bufs" is read.
+ */
+int drm_bufs_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_device_dma *dma;
+       int i, seg_pages;
+
+       mutex_lock(&dev->struct_mutex);
+       dma = dev->dma;
+       if (!dma) {
+               mutex_unlock(&dev->struct_mutex);
+               return 0;
+       }
+
+       seq_printf(m, " o     size count  free   segs pages    kB\n\n");
+       for (i = 0; i <= DRM_MAX_ORDER; i++) {
+               if (dma->bufs[i].buf_count) {
+                       seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
+                       seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
+                                  i,
+                                  dma->bufs[i].buf_size,
+                                  dma->bufs[i].buf_count,
+                                  atomic_read(&dma->bufs[i].freelist.count),
+                                  dma->bufs[i].seg_count,
+                                  seg_pages,
+                                  seg_pages * PAGE_SIZE / 1024);
+               }
+       }
+       seq_printf(m, "\n");
+       for (i = 0; i < dma->buf_count; i++) {
+               if (i && !(i % 32))
+                       seq_printf(m, "\n");
+               seq_printf(m, " %d", dma->buflist[i]->list);
+       }
+       seq_printf(m, "\n");
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vblank" is read.
+ */
+int drm_vblank_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       int crtc;
+
+       mutex_lock(&dev->struct_mutex);
+       for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+               seq_printf(m, "CRTC %d enable:     %d\n",
+                          crtc, atomic_read(&dev->vblank_refcount[crtc]));
+               seq_printf(m, "CRTC %d counter:    %d\n",
+                          crtc, drm_vblank_count(dev, crtc));
+               seq_printf(m, "CRTC %d last wait:  %d\n",
+                          crtc, dev->last_vblank_wait[crtc]);
+               seq_printf(m, "CRTC %d in modeset: %d\n",
+                          crtc, dev->vblank_inmodeset[crtc]);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../clients" is read.
+ *
+ */
+int drm_clients_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_file *priv;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "a dev    pid    uid      magic     ioctls\n\n");
+       list_for_each_entry(priv, &dev->filelist, lhead) {
+               seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+                          priv->authenticated ? 'y' : 'n',
+                          priv->minor->index,
+                          priv->pid,
+                          priv->uid, priv->magic, priv->ioctl_count);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+
+int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+       struct drm_gem_object *obj = ptr;
+       struct seq_file *m = data;
+
+       seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
+
+       seq_printf(m, "%6d %8zd %7d %8d\n",
+                  obj->name, obj->size,
+                  atomic_read(&obj->handlecount.refcount),
+                  atomic_read(&obj->refcount.refcount));
+       return 0;
+}
+
+int drm_gem_name_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+
+       seq_printf(m, "  name     size handles refcount\n");
+       idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+       return 0;
+}
+
+int drm_gem_object_info(struct seq_file *m, void* data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+
+       seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
+       seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
+       seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
+       seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
+       seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
+       seq_printf(m, "%d gtt total\n", dev->gtt_total);
+       return 0;
+}
+
+#if DRM_DEBUG_CODE
+
+int drm_vma_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_vma_entry *pt;
+       struct vm_area_struct *vma;
+#if defined(__i386__)
+       unsigned int pgprot;
+#endif
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
+                  atomic_read(&dev->vma_count),
+                  high_memory, (u64)virt_to_phys(high_memory));
+
+       list_for_each_entry(pt, &dev->vmalist, head) {
+               vma = pt->vma;
+               if (!vma)
+                       continue;
+               seq_printf(m,
+                          "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
+                          pt->pid, vma->vm_start, vma->vm_end,
+                          vma->vm_flags & VM_READ ? 'r' : '-',
+                          vma->vm_flags & VM_WRITE ? 'w' : '-',
+                          vma->vm_flags & VM_EXEC ? 'x' : '-',
+                          vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+                          vma->vm_flags & VM_LOCKED ? 'l' : '-',
+                          vma->vm_flags & VM_IO ? 'i' : '-',
+                          vma->vm_pgoff);
+
+#if defined(__i386__)
+               pgprot = pgprot_val(vma->vm_page_prot);
+               seq_printf(m, " %c%c%c%c%c%c%c%c%c",
+                          pgprot & _PAGE_PRESENT ? 'p' : '-',
+                          pgprot & _PAGE_RW ? 'w' : 'r',
+                          pgprot & _PAGE_USER ? 'u' : 's',
+                          pgprot & _PAGE_PWT ? 't' : 'b',
+                          pgprot & _PAGE_PCD ? 'u' : 'c',
+                          pgprot & _PAGE_ACCESSED ? 'a' : '-',
+                          pgprot & _PAGE_DIRTY ? 'd' : '-',
+                          pgprot & _PAGE_PSE ? 'm' : 'k',
+                          pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+#endif
+               seq_printf(m, "\n");
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+#endif
+
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_ioctl.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_ioctl.c
new file mode 100644 (file)
index 0000000..9b9ff46
--- /dev/null
@@ -0,0 +1,357 @@
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan  8 09:01:26 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_core.h"
+
+#include "linux/pci.h"
+
+/**
+ * Get the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from drm_device::unique into user space.
+ */
+int drm_getunique(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_unique *u = data;
+       struct drm_master *master = file_priv->master;
+
+       if (u->unique_len >= master->unique_len) {
+               if (copy_to_user(u->unique, master->unique, master->unique_len))
+                       return -EFAULT;
+       }
+       u->unique_len = master->unique_len;
+
+       return 0;
+}
+
+/**
+ * Set the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from userspace into drm_device::unique, and verifies that
+ * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
+ * in interface version 1.1 and will return EBUSY when setversion has requested
+ * version 1.1 or greater.
+ */
+int drm_setunique(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_unique *u = data;
+       struct drm_master *master = file_priv->master;
+       int domain, bus, slot, func, ret;
+
+       if (master->unique_len || master->unique)
+               return -EBUSY;
+
+       if (!u->unique_len || u->unique_len > 1024)
+               return -EINVAL;
+
+       master->unique_len = u->unique_len;
+       master->unique_size = u->unique_len + 1;
+       master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+       if (!master->unique)
+               return -ENOMEM;
+       if (copy_from_user(master->unique, u->unique, master->unique_len))
+               return -EFAULT;
+
+       master->unique[master->unique_len] = '\0';
+
+       dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
+                              strlen(master->unique) + 2, GFP_KERNEL);
+       if (!dev->devname)
+               return -ENOMEM;
+
+       sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+               master->unique);
+
+       /* Return error if the busid submitted doesn't match the device's actual
+        * busid.
+        */
+       ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+       if (ret != 3)
+               return -EINVAL;
+       domain = bus >> 8;
+       bus &= 0xff;
+
+       if ((domain != drm_get_pci_domain(dev)) ||
+           (bus != dev->pdev->bus->number) ||
+           (slot != PCI_SLOT(dev->pdev->devfn)) ||
+           (func != PCI_FUNC(dev->pdev->devfn)))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_master *master = file_priv->master;
+       int len;
+
+       if (master->unique != NULL)
+               return -EBUSY;
+
+       master->unique_len = 40;
+       master->unique_size = master->unique_len;
+       master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+       if (master->unique == NULL)
+               return -ENOMEM;
+
+       len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
+                      drm_get_pci_domain(dev),
+                      dev->pdev->bus->number,
+                      PCI_SLOT(dev->pdev->devfn),
+                      PCI_FUNC(dev->pdev->devfn));
+       if (len >= master->unique_len)
+               DRM_ERROR("buffer overflow");
+       else
+               master->unique_len = len;
+
+       dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
+                              master->unique_len + 2, GFP_KERNEL);
+       if (dev->devname == NULL)
+               return -ENOMEM;
+
+       sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+               master->unique);
+
+       return 0;
+}
+
+/**
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+int drm_getmap(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_map *map = data;
+       struct drm_map_list *r_list = NULL;
+       struct list_head *list;
+       int idx;
+       int i;
+
+       idx = map->offset;
+
+       mutex_lock(&dev->struct_mutex);
+       if (idx < 0) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       i = 0;
+       list_for_each(list, &dev->maplist) {
+               if (i == idx) {
+                       r_list = list_entry(list, struct drm_map_list, head);
+                       break;
+               }
+               i++;
+       }
+       if (!r_list || !r_list->map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       map->offset = r_list->map->offset;
+       map->size = r_list->map->size;
+       map->type = r_list->map->type;
+       map->flags = r_list->map->flags;
+       map->handle = (void *)(unsigned long) r_list->user_token;
+       map->mtrr = r_list->map->mtrr;
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Get client information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_client structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the client with the specified index and copies its information
+ * into userspace
+ */
+int drm_getclient(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_client *client = data;
+       struct drm_file *pt;
+       int idx;
+       int i;
+
+       idx = client->idx;
+       mutex_lock(&dev->struct_mutex);
+
+       i = 0;
+       list_for_each_entry(pt, &dev->filelist, lhead) {
+               if (i++ >= idx) {
+                       client->auth = pt->authenticated;
+                       client->pid = pt->pid;
+                       client->uid = pt->uid;
+                       client->magic = pt->magic;
+                       client->iocs = pt->ioctl_count;
+                       mutex_unlock(&dev->struct_mutex);
+
+                       return 0;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return -EINVAL;
+}
+
+/**
+ * Get statistics information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_stats structure.
+ *
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getstats(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_stats *stats = data;
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+
+       mutex_lock(&dev->struct_mutex);
+
+       for (i = 0; i < dev->counters; i++) {
+               if (dev->types[i] == _DRM_STAT_LOCK)
+                       stats->data[i].value =
+                           (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+               else
+                       stats->data[i].value = atomic_read(&dev->counts[i]);
+               stats->data[i].type = dev->types[i];
+       }
+
+       stats->count = dev->counters;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Setversion ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Sets the requested interface version
+ */
+int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_set_version *sv = data;
+       int if_version, retcode = 0;
+
+       if (sv->drm_di_major != -1) {
+               if (sv->drm_di_major != DRM_IF_MAJOR ||
+                   sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
+                       retcode = -EINVAL;
+                       goto done;
+               }
+               if_version = DRM_IF_VERSION(sv->drm_di_major,
+                                           sv->drm_di_minor);
+               dev->if_version = max(if_version, dev->if_version);
+               if (sv->drm_di_minor >= 1) {
+                       /*
+                        * Version 1.1 includes tying of DRM to specific device
+                        */
+                       drm_set_busid(dev, file_priv);
+               }
+       }
+
+       if (sv->drm_dd_major != -1) {
+               if (sv->drm_dd_major != dev->driver->major ||
+                   sv->drm_dd_minor < 0 || sv->drm_dd_minor >
+                   dev->driver->minor) {
+                       retcode = -EINVAL;
+                       goto done;
+               }
+
+               if (dev->driver->set_version)
+                       dev->driver->set_version(dev, sv);
+       }
+
+done:
+       sv->drm_di_major = DRM_IF_MAJOR;
+       sv->drm_di_minor = DRM_IF_MINOR;
+       sv->drm_dd_major = dev->driver->major;
+       sv->drm_dd_minor = dev->driver->minor;
+
+       return retcode;
+}
+
+/** No-op ioctl. */
+int drm_noop(struct drm_device *dev, void *data,
+            struct drm_file *file_priv)
+{
+       DRM_DEBUG("\n");
+       return 0;
+}
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_irq.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_irq.c
new file mode 100644 (file)
index 0000000..7998ee6
--- /dev/null
@@ -0,0 +1,771 @@
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#include <linux/interrupt.h>   /* For task queue support */
+
+#include <linux/vgaarb.h>
+/**
+ * Get interrupt from bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_irq_busid *p = data;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+           (p->busnum & 0xff) != dev->pdev->bus->number ||
+           p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+               return -EINVAL;
+
+       p->irq = dev->pdev->irq;
+
+       DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+                 p->irq);
+
+       return 0;
+}
+
+static void vblank_disable_fn(unsigned long arg)
+{
+       struct drm_device *dev = (struct drm_device *)arg;
+       unsigned long irqflags;
+       int i;
+
+       if (!dev->vblank_disable_allowed)
+               return;
+
+       for (i = 0; i < dev->num_crtcs; i++) {
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+               if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+                   dev->vblank_enabled[i]) {
+                       DRM_DEBUG("disabling vblank on crtc %d\n", i);
+                       dev->last_vblank[i] =
+                               dev->driver->get_vblank_counter(dev, i);
+                       dev->driver->disable_vblank(dev, i);
+                       dev->vblank_enabled[i] = 0;
+               }
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+       }
+}
+
+void drm_vblank_cleanup(struct drm_device *dev)
+{
+       /* Bail if the driver didn't call drm_vblank_init() */
+       if (dev->num_crtcs == 0)
+               return;
+
+       del_timer(&dev->vblank_disable_timer);
+
+       vblank_disable_fn((unsigned long)dev);
+
+       kfree(dev->vbl_queue);
+       kfree(dev->_vblank_count);
+       kfree(dev->vblank_refcount);
+       kfree(dev->vblank_enabled);
+       kfree(dev->last_vblank);
+       kfree(dev->last_vblank_wait);
+       kfree(dev->vblank_inmodeset);
+
+       dev->num_crtcs = 0;
+}
+
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+       int i, ret = -ENOMEM;
+
+       setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+                   (unsigned long)dev);
+       spin_lock_init(&dev->vbl_lock);
+       dev->num_crtcs = num_crtcs;
+
+       dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
+                                GFP_KERNEL);
+       if (!dev->vbl_queue)
+               goto err;
+
+       dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
+       if (!dev->_vblank_count)
+               goto err;
+
+       dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
+                                      GFP_KERNEL);
+       if (!dev->vblank_refcount)
+               goto err;
+
+       dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
+       if (!dev->vblank_enabled)
+               goto err;
+
+       dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
+       if (!dev->last_vblank)
+               goto err;
+
+       dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
+       if (!dev->last_vblank_wait)
+               goto err;
+
+       dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
+       if (!dev->vblank_inmodeset)
+               goto err;
+
+       /* Zero per-crtc vblank stuff */
+       for (i = 0; i < num_crtcs; i++) {
+               init_waitqueue_head(&dev->vbl_queue[i]);
+               atomic_set(&dev->_vblank_count[i], 0);
+               atomic_set(&dev->vblank_refcount[i], 0);
+       }
+
+       dev->vblank_disable_allowed = 0;
+
+       return 0;
+
+err:
+       drm_vblank_cleanup(dev);
+       return ret;
+}
+EXPORT_SYMBOL(drm_vblank_init);
+
+static void drm_irq_vgaarb_nokms(void *cookie, bool state)
+{
+       struct drm_device *dev = cookie;
+
+       if (dev->driver->vgaarb_irq) {
+               dev->driver->vgaarb_irq(dev, state);
+               return;
+       }
+
+       if (!dev->irq_enabled)
+               return;
+
+       if (state)
+               dev->driver->irq_uninstall(dev);
+       else {
+               dev->driver->irq_preinstall(dev);
+               dev->driver->irq_postinstall(dev);
+       }
+}
+
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Initializes the IRQ related data. Installs the handler, calling the driver
+ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
+ * before and after the installation.
+ */
+int drm_irq_install(struct drm_device *dev)
+{
+       int ret = 0;
+       unsigned long sh_flags = 0;
+       char *irqname;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       if (dev->pdev->irq == 0)
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+
+       /* Driver must have been initialized */
+       if (!dev->dev_private) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       if (dev->irq_enabled) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EBUSY;
+       }
+       dev->irq_enabled = 1;
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+
+       /* Before installing handler */
+       dev->driver->irq_preinstall(dev);
+
+       /* Install handler */
+       if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
+               sh_flags = IRQF_SHARED;
+
+       if (dev->devname)
+               irqname = dev->devname;
+       else
+               irqname = dev->driver->name;
+
+       ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
+                         sh_flags, irqname, dev);
+
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
+
+       /* After installing handler */
+       ret = dev->driver->irq_postinstall(dev);
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_irq_install);
+
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
+ */
+int drm_irq_uninstall(struct drm_device * dev)
+{
+       unsigned long irqflags;
+       int irq_enabled, i;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+       irq_enabled = dev->irq_enabled;
+       dev->irq_enabled = 0;
+       mutex_unlock(&dev->struct_mutex);
+
+       /*
+        * Wake up any waiters so they don't hang.
+        */
+       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       for (i = 0; i < dev->num_crtcs; i++) {
+               DRM_WAKEUP(&dev->vbl_queue[i]);
+               dev->vblank_enabled[i] = 0;
+               dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
+       }
+       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+       if (!irq_enabled)
+               return -EINVAL;
+
+       DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               vga_client_register(dev->pdev, NULL, NULL, NULL);
+
+       dev->driver->irq_uninstall(dev);
+
+       free_irq(dev->pdev->irq, dev);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_irq_uninstall);
+
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
+int drm_control(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_control *ctl = data;
+
+       /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+
+
+       switch (ctl->func) {
+       case DRM_INST_HANDLER:
+               if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+                       return 0;
+               if (drm_core_check_feature(dev, DRIVER_MODESET))
+                       return 0;
+               if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+                   ctl->irq != dev->pdev->irq)
+                       return -EINVAL;
+               return drm_irq_install(dev);
+       case DRM_UNINST_HANDLER:
+               if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+                       return 0;
+               if (drm_core_check_feature(dev, DRIVER_MODESET))
+                       return 0;
+               return drm_irq_uninstall(dev);
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+       return atomic_read(&dev->_vblank_count[crtc]);
+}
+EXPORT_SYMBOL(drm_vblank_count);
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc).  Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+{
+       u32 cur_vblank, diff;
+
+       /*
+        * Interrupts were disabled prior to this call, so deal with counter
+        * wrap if needed.
+        * NOTE!  It's possible we lost a full dev->max_vblank_count events
+        * here if the register is small or we had vblank interrupts off for
+        * a long time.
+        */
+       cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+       diff = cur_vblank - dev->last_vblank[crtc];
+       if (cur_vblank < dev->last_vblank[crtc]) {
+               diff += dev->max_vblank_count;
+
+               DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+                         crtc, dev->last_vblank[crtc], cur_vblank, diff);
+       }
+
+       DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+                 crtc, diff);
+
+       atomic_add(diff, &dev->_vblank_count[crtc]);
+}
+
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
+{
+       unsigned long irqflags;
+       int ret = 0;
+
+       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       /* Going from 0->1 means we have to enable interrupts again */
+       if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+               if (!dev->vblank_enabled[crtc]) {
+                       ret = dev->driver->enable_vblank(dev, crtc);
+                       DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+                       if (ret)
+                               atomic_dec(&dev->vblank_refcount[crtc]);
+                       else {
+                               dev->vblank_enabled[crtc] = 1;
+                               drm_update_vblank_count(dev, crtc);
+                       }
+               }
+       } else {
+               if (!dev->vblank_enabled[crtc]) {
+                       atomic_dec(&dev->vblank_refcount[crtc]);
+                       ret = -EINVAL;
+               }
+       }
+       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_vblank_get);
+
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
+{
+       BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+
+       /* Last user schedules interrupt disable */
+       if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
+               mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+}
+EXPORT_SYMBOL(drm_vblank_put);
+
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       DRM_WAKEUP(&dev->vbl_queue[crtc]);
+       dev->vblank_enabled[crtc] = 0;
+       dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_off);
+
+/**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @post: post or pre mode set?
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
+ */
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+{
+       /*
+        * To avoid all the problems that might happen if interrupts
+        * were enabled/disabled around or between these calls, we just
+        * have the kernel take a reference on the CRTC (just once though
+        * to avoid corrupting the count if multiple, mismatch calls occur),
+        * so that interrupts remain enabled in the interim.
+        */
+       if (!dev->vblank_inmodeset[crtc]) {
+               dev->vblank_inmodeset[crtc] = 0x1;
+               if (drm_vblank_get(dev, crtc) == 0)
+                       dev->vblank_inmodeset[crtc] |= 0x2;
+       }
+}
+EXPORT_SYMBOL(drm_vblank_pre_modeset);
+
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+{
+       unsigned long irqflags;
+
+       if (dev->vblank_inmodeset[crtc]) {
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+               dev->vblank_disable_allowed = 1;
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+               if (dev->vblank_inmodeset[crtc] & 0x2)
+                       drm_vblank_put(dev, crtc);
+
+               dev->vblank_inmodeset[crtc] = 0;
+       }
+}
+EXPORT_SYMBOL(drm_vblank_post_modeset);
+
+/**
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ *
+ * Generally the counter will reset across mode sets.  If interrupts are
+ * enabled around this call, we don't have to do anything since the counter
+ * will have already been incremented.
+ */
+int drm_modeset_ctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_modeset_ctl *modeset = data;
+       int crtc, ret = 0;
+
+       /* If drm_vblank_init() hasn't been called yet, just no-op */
+       if (!dev->num_crtcs)
+               goto out;
+
+       crtc = modeset->crtc;
+       if (crtc >= dev->num_crtcs) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       switch (modeset->cmd) {
+       case _DRM_PRE_MODESET:
+               drm_vblank_pre_modeset(dev, crtc);
+               break;
+       case _DRM_POST_MODESET:
+               drm_vblank_post_modeset(dev, crtc);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+out:
+       return ret;
+}
+
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+                                 union drm_wait_vblank *vblwait,
+                                 struct drm_file *file_priv)
+{
+       struct drm_pending_vblank_event *e;
+       struct timeval now;
+       unsigned long flags;
+       unsigned int seq;
+
+       e = kzalloc(sizeof *e, GFP_KERNEL);
+       if (e == NULL)
+               return -ENOMEM;
+
+       e->pipe = pipe;
+       e->event.base.type = DRM_EVENT_VBLANK;
+       e->event.base.length = sizeof e->event;
+       e->event.user_data = vblwait->request.signal;
+       e->base.event = &e->event.base;
+       e->base.file_priv = file_priv;
+       e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+       do_gettimeofday(&now);
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       if (file_priv->event_space < sizeof e->event) {
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+               kfree(e);
+               return -ENOMEM;
+       }
+
+       file_priv->event_space -= sizeof e->event;
+       seq = drm_vblank_count(dev, pipe);
+       if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+           (seq - vblwait->request.sequence) <= (1 << 23)) {
+               vblwait->request.sequence = seq + 1;
+               vblwait->reply.sequence = vblwait->request.sequence;
+       }
+
+       DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+                 vblwait->request.sequence, seq, pipe);
+
+       e->event.sequence = vblwait->request.sequence;
+       if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+               drm_vblank_put(dev, e->pipe);
+               list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+       } else {
+               list_add_tail(&e->base.link, &dev->vblank_event_list);
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       return 0;
+}
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * This function enables the vblank interrupt on the pipe requested, then
+ * sleeps waiting for the requested sequence number to occur, and drops
+ * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * after a timeout with no further vblank waits scheduled).
+ */
+int drm_wait_vblank(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       union drm_wait_vblank *vblwait = data;
+       int ret = 0;
+       unsigned int flags, seq, crtc;
+
+       if ((!dev->pdev->irq) || (!dev->irq_enabled))
+               return -EINVAL;
+
+       if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+               return -EINVAL;
+
+       if (vblwait->request.type &
+           ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+               DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+                         vblwait->request.type,
+                         (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
+               return -EINVAL;
+       }
+
+       flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+       crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+
+       if (crtc >= dev->num_crtcs)
+               return -EINVAL;
+
+       ret = drm_vblank_get(dev, crtc);
+       if (ret) {
+               DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
+               return ret;
+       }
+       seq = drm_vblank_count(dev, crtc);
+
+       switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
+       case _DRM_VBLANK_RELATIVE:
+               vblwait->request.sequence += seq;
+               vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+       case _DRM_VBLANK_ABSOLUTE:
+               break;
+       default:
+               ret = -EINVAL;
+               goto done;
+       }
+
+       if (flags & _DRM_VBLANK_EVENT)
+               return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+
+       if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+           (seq - vblwait->request.sequence) <= (1<<23)) {
+               vblwait->request.sequence = seq + 1;
+       }
+
+       DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+                 vblwait->request.sequence, crtc);
+       dev->last_vblank_wait[crtc] = vblwait->request.sequence;
+       DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+                   (((drm_vblank_count(dev, crtc) -
+                      vblwait->request.sequence) <= (1 << 23)) ||
+                    !dev->irq_enabled));
+
+       if (ret != -EINTR) {
+               struct timeval now;
+
+               do_gettimeofday(&now);
+
+               vblwait->reply.tval_sec = now.tv_sec;
+               vblwait->reply.tval_usec = now.tv_usec;
+               vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+               DRM_DEBUG("returning %d to client\n",
+                         vblwait->reply.sequence);
+       } else {
+               DRM_DEBUG("vblank wait interrupted by signal\n");
+       }
+
+done:
+       drm_vblank_put(dev, crtc);
+       return ret;
+}
+
+void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+       struct drm_pending_vblank_event *e, *t;
+       struct timeval now;
+       unsigned long flags;
+       unsigned int seq;
+
+       do_gettimeofday(&now);
+       seq = drm_vblank_count(dev, crtc);
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+               if (e->pipe != crtc)
+                       continue;
+               if ((seq - e->event.sequence) > (1<<23))
+                       continue;
+
+               DRM_DEBUG("vblank event on %d, current %d\n",
+                         e->event.sequence, seq);
+
+               e->event.sequence = seq;
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+               drm_vblank_put(dev, e->pipe);
+               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+void drm_handle_vblank(struct drm_device *dev, int crtc)
+{
+       if (!dev->num_crtcs)
+               return;
+
+       atomic_inc(&dev->_vblank_count[crtc]);
+       DRM_WAKEUP(&dev->vbl_queue[crtc]);
+       drm_handle_vblank_events(dev, crtc);
+}
+EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_lock.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_lock.c
new file mode 100644 (file)
index 0000000..e2f70a5
--- /dev/null
@@ -0,0 +1,392 @@
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+static int drm_notifier(void *priv);
+
+/**
+ * Lock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Add the current task to the lock wait queue, and attempt to take to lock.
+ */
+int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       DECLARE_WAITQUEUE(entry, current);
+       struct drm_lock *lock = data;
+       struct drm_master *master = file_priv->master;
+       int ret = 0;
+
+       ++file_priv->lock_count;
+
+       if (lock->context == DRM_KERNEL_CONTEXT) {
+               DRM_ERROR("Process %d using kernel context %d\n",
+                         task_pid_nr(current), lock->context);
+               return -EINVAL;
+       }
+
+       DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+                 lock->context, task_pid_nr(current),
+                 master->lock.hw_lock->lock, lock->flags);
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
+               if (lock->context < 0)
+                       return -EINVAL;
+
+       add_wait_queue(&master->lock.lock_queue, &entry);
+       spin_lock_bh(&master->lock.spinlock);
+       master->lock.user_waiters++;
+       spin_unlock_bh(&master->lock.spinlock);
+
+       for (;;) {
+               __set_current_state(TASK_INTERRUPTIBLE);
+               if (!master->lock.hw_lock) {
+                       /* Device has been unregistered */
+                       send_sig(SIGTERM, current, 0);
+                       ret = -EINTR;
+                       break;
+               }
+               if (drm_lock_take(&master->lock, lock->context)) {
+                       master->lock.file_priv = file_priv;
+                       master->lock.lock_time = jiffies;
+                       atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+                       break;  /* Got lock */
+               }
+
+               /* Contention */
+               schedule();
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+       }
+       spin_lock_bh(&master->lock.spinlock);
+       master->lock.user_waiters--;
+       spin_unlock_bh(&master->lock.spinlock);
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&master->lock.lock_queue, &entry);
+
+       DRM_DEBUG("%d %s\n", lock->context,
+                 ret ? "interrupted" : "has lock");
+       if (ret) return ret;
+
+       /* don't set the block all signals on the master process for now 
+        * really probably not the correct answer but lets us debug xkb
+        * xserver for now */
+       if (!file_priv->is_master) {
+               sigemptyset(&dev->sigmask);
+               sigaddset(&dev->sigmask, SIGSTOP);
+               sigaddset(&dev->sigmask, SIGTSTP);
+               sigaddset(&dev->sigmask, SIGTTIN);
+               sigaddset(&dev->sigmask, SIGTTOU);
+               dev->sigdata.context = lock->context;
+               dev->sigdata.lock = master->lock.hw_lock;
+               block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+       }
+
+       if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
+               dev->driver->dma_ready(dev);
+
+       if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+       {
+               if (dev->driver->dma_quiescent(dev)) {
+                       DRM_DEBUG("%d waiting for DMA quiescent\n",
+                                 lock->context);
+                       return -EBUSY;
+               }
+       }
+
+       if (dev->driver->kernel_context_switch &&
+           dev->last_context != lock->context) {
+               dev->driver->kernel_context_switch(dev, dev->last_context,
+                                                  lock->context);
+       }
+
+       return 0;
+}
+
+/**
+ * Unlock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Transfer and free the lock.
+ */
+int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_lock *lock = data;
+       struct drm_master *master = file_priv->master;
+
+       if (lock->context == DRM_KERNEL_CONTEXT) {
+               DRM_ERROR("Process %d using kernel context %d\n",
+                         task_pid_nr(current), lock->context);
+               return -EINVAL;
+       }
+
+       atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+       /* kernel_context_switch isn't used by any of the x86 drm
+        * modules but is required by the Sparc driver.
+        */
+       if (dev->driver->kernel_context_switch_unlock)
+               dev->driver->kernel_context_switch_unlock(dev);
+       else {
+               if (drm_lock_free(&master->lock, lock->context)) {
+                       /* FIXME: Should really bail out here. */
+               }
+       }
+
+       unblock_all_signals();
+       return 0;
+}
+
+/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+int drm_lock_take(struct drm_lock_data *lock_data,
+                 unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       do {
+               old = *lock;
+               if (old & _DRM_LOCK_HELD)
+                       new = old | _DRM_LOCK_CONT;
+               else {
+                       new = context | _DRM_LOCK_HELD |
+                               ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+                                _DRM_LOCK_CONT : 0);
+               }
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+       spin_unlock_bh(&lock_data->spinlock);
+
+       if (_DRM_LOCKING_CONTEXT(old) == context) {
+               if (old & _DRM_LOCK_HELD) {
+                       if (context != DRM_KERNEL_CONTEXT) {
+                               DRM_ERROR("%d holds heavyweight lock\n",
+                                         context);
+                       }
+                       return 0;
+               }
+       }
+
+       if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
+               /* Have lock */
+               return 1;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_lock_take);
+
+/**
+ * This takes a lock forcibly and hands it to context. Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+                            unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       lock_data->file_priv = NULL;
+       do {
+               old = *lock;
+               new = context | _DRM_LOCK_HELD;
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+       return 1;
+}
+
+/**
+ * Free lock.
+ *
+ * \param dev DRM device.
+ * \param lock lock.
+ * \param context context.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
+ * waiting on the lock queue.
+ */
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       if (lock_data->kernel_waiters != 0) {
+               drm_lock_transfer(lock_data, 0);
+               lock_data->idle_has_lock = 1;
+               spin_unlock_bh(&lock_data->spinlock);
+               return 1;
+       }
+       spin_unlock_bh(&lock_data->spinlock);
+
+       do {
+               old = *lock;
+               new = _DRM_LOCKING_CONTEXT(old);
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+
+       if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+               DRM_ERROR("%d freed heavyweight lock held by %d\n",
+                         context, _DRM_LOCKING_CONTEXT(old));
+               return 1;
+       }
+       wake_up_interruptible(&lock_data->lock_queue);
+       return 0;
+}
+EXPORT_SYMBOL(drm_lock_free);
+
+/**
+ * If we get here, it means that the process has called DRM_IOCTL_LOCK
+ * without calling DRM_IOCTL_UNLOCK.
+ *
+ * If the lock is not held, then let the signal proceed as usual.  If the lock
+ * is held, then set the contended flag and keep the signal blocked.
+ *
+ * \param priv pointer to a drm_sigdata structure.
+ * \return one if the signal should be delivered normally, or zero if the
+ * signal should be blocked.
+ */
+static int drm_notifier(void *priv)
+{
+       struct drm_sigdata *s = (struct drm_sigdata *) priv;
+       unsigned int old, new, prev;
+
+       /* Allow signal delivery if lock isn't held */
+       if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
+           || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
+               return 1;
+
+       /* Otherwise, set flag to force call to
+          drmUnlock */
+       do {
+               old = s->lock->lock;
+               new = old | _DRM_LOCK_CONT;
+               prev = cmpxchg(&s->lock->lock, old, new);
+       } while (prev != old);
+       return 0;
+}
+
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(struct drm_lock_data *lock_data)
+{
+       int ret = 0;
+
+       spin_lock_bh(&lock_data->spinlock);
+       lock_data->kernel_waiters++;
+       if (!lock_data->idle_has_lock) {
+
+               spin_unlock_bh(&lock_data->spinlock);
+               ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
+               spin_lock_bh(&lock_data->spinlock);
+
+               if (ret == 1)
+                       lock_data->idle_has_lock = 1;
+       }
+       spin_unlock_bh(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_take);
+
+void drm_idlelock_release(struct drm_lock_data *lock_data)
+{
+       unsigned int old, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       if (--lock_data->kernel_waiters == 0) {
+               if (lock_data->idle_has_lock) {
+                       do {
+                               old = *lock;
+                               prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
+                       } while (prev != old);
+                       wake_up_interruptible(&lock_data->lock_queue);
+                       lock_data->idle_has_lock = 0;
+               }
+       }
+       spin_unlock_bh(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_release);
+
+
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_master *master = file_priv->master;
+       return (file_priv->lock_count && master->lock.hw_lock &&
+               _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+               master->lock.file_priv == file_priv);
+}
+
+EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_memory.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_memory.c
new file mode 100644 (file)
index 0000000..e4865f9
--- /dev/null
@@ -0,0 +1,168 @@
+/**
+ * \file drm_memory.c
+ * Memory management wrappers for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include "drmP.h"
+
+/**
+ * Called when "/proc/dri/%dev%/mem" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param len requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ *
+ * No-op.
+ */
+int drm_mem_info(char *buf, char **start, off_t offset,
+                int len, int *eof, void *data)
+{
+       return 0;
+}
+
+#if __OS_HAS_AGP
+static void *agp_remap(unsigned long offset, unsigned long size,
+                      struct drm_device * dev)
+{
+       unsigned long i, num_pages =
+           PAGE_ALIGN(size) / PAGE_SIZE;
+       struct drm_agp_mem *agpmem;
+       struct page **page_map;
+       struct page **phys_page_map;
+       void *addr;
+
+       size = PAGE_ALIGN(size);
+
+#ifdef __alpha__
+       offset -= dev->hose->mem_space->start;
+#endif
+
+       list_for_each_entry(agpmem, &dev->agp->memory, head)
+               if (agpmem->bound <= offset
+                   && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
+                   (offset + size))
+                       break;
+       if (!agpmem)
+               return NULL;
+
+       /*
+        * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
+        * the CPU do not get remapped by the GART.  We fix this by using the kernel's
+        * page-table instead (that's probably faster anyhow...).
+        */
+       /* note: use vmalloc() because num_pages could be large... */
+       page_map = vmalloc(num_pages * sizeof(struct page *));
+       if (!page_map)
+               return NULL;
+
+       phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
+       for (i = 0; i < num_pages; ++i)
+               page_map[i] = phys_page_map[i];
+       addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
+       vfree(page_map);
+
+       return addr;
+}
+
+/** Wrapper around agp_allocate_memory() */
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
+{
+       return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
+}
+
+/** Wrapper around agp_free_memory() */
+int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+{
+       return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL(drm_free_agp);
+
+/** Wrapper around agp_bind_memory() */
+int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+{
+       return drm_agp_bind_memory(handle, start);
+}
+
+/** Wrapper around agp_unbind_memory() */
+int drm_unbind_agp(DRM_AGP_MEM * handle)
+{
+       return drm_agp_unbind_memory(handle);
+}
+EXPORT_SYMBOL(drm_unbind_agp);
+
+#else  /*  __OS_HAS_AGP  */
+static inline void *agp_remap(unsigned long offset, unsigned long size,
+                             struct drm_device * dev)
+{
+       return NULL;
+}
+
+#endif                         /* agp */
+
+void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
+{
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               map->handle = agp_remap(map->offset, map->size, dev);
+       else
+               map->handle = ioremap(map->offset, map->size);
+}
+EXPORT_SYMBOL(drm_core_ioremap);
+
+void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
+{
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               map->handle = agp_remap(map->offset, map->size, dev);
+       else
+               map->handle = ioremap_wc(map->offset, map->size);
+}
+EXPORT_SYMBOL(drm_core_ioremap_wc);
+
+void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
+{
+       if (!map->handle || !map->size)
+               return;
+
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               vunmap(map->handle);
+       else
+               iounmap(map->handle);
+}
+EXPORT_SYMBOL(drm_core_ioremapfree);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_mm.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_mm.c
new file mode 100644 (file)
index 0000000..d7d7eac
--- /dev/null
@@ -0,0 +1,510 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented. Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Aligned allocations can also see improvement.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm_mm.h"
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+
+#define MM_UNUSED_TARGET 4
+
+unsigned long drm_mm_tail_space(struct drm_mm *mm)
+{
+       struct list_head *tail_node;
+       struct drm_mm_node *entry;
+
+       tail_node = mm->ml_entry.prev;
+       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       if (!entry->free)
+               return 0;
+
+       return entry->size;
+}
+
+int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+{
+       struct list_head *tail_node;
+       struct drm_mm_node *entry;
+
+       tail_node = mm->ml_entry.prev;
+       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       if (!entry->free)
+               return -ENOMEM;
+
+       if (entry->size <= size)
+               return -ENOMEM;
+
+       entry->size -= size;
+       return 0;
+}
+
+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
+{
+       struct drm_mm_node *child;
+
+       if (atomic)
+               child = kmalloc(sizeof(*child), GFP_ATOMIC);
+       else
+               child = kmalloc(sizeof(*child), GFP_KERNEL);
+
+       if (unlikely(child == NULL)) {
+               spin_lock(&mm->unused_lock);
+               if (list_empty(&mm->unused_nodes))
+                       child = NULL;
+               else {
+                       child =
+                           list_entry(mm->unused_nodes.next,
+                                      struct drm_mm_node, fl_entry);
+                       list_del(&child->fl_entry);
+                       --mm->num_unused;
+               }
+               spin_unlock(&mm->unused_lock);
+       }
+       return child;
+}
+
+/* drm_mm_pre_get() - pre allocate drm_mm_node structure
+ * drm_mm:     memory manager struct we are pre-allocating for
+ *
+ * Returns 0 on success or -ENOMEM if allocation fails.
+ */
+int drm_mm_pre_get(struct drm_mm *mm)
+{
+       struct drm_mm_node *node;
+
+       spin_lock(&mm->unused_lock);
+       while (mm->num_unused < MM_UNUSED_TARGET) {
+               spin_unlock(&mm->unused_lock);
+               node = kmalloc(sizeof(*node), GFP_KERNEL);
+               spin_lock(&mm->unused_lock);
+
+               if (unlikely(node == NULL)) {
+                       int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
+                       spin_unlock(&mm->unused_lock);
+                       return ret;
+               }
+               ++mm->num_unused;
+               list_add_tail(&node->fl_entry, &mm->unused_nodes);
+       }
+       spin_unlock(&mm->unused_lock);
+       return 0;
+}
+EXPORT_SYMBOL(drm_mm_pre_get);
+
+static int drm_mm_create_tail_node(struct drm_mm *mm,
+                                  unsigned long start,
+                                  unsigned long size, int atomic)
+{
+       struct drm_mm_node *child;
+
+       child = drm_mm_kmalloc(mm, atomic);
+       if (unlikely(child == NULL))
+               return -ENOMEM;
+
+       child->free = 1;
+       child->size = size;
+       child->start = start;
+       child->mm = mm;
+
+       list_add_tail(&child->ml_entry, &mm->ml_entry);
+       list_add_tail(&child->fl_entry, &mm->fl_entry);
+
+       return 0;
+}
+
+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
+{
+       struct list_head *tail_node;
+       struct drm_mm_node *entry;
+
+       tail_node = mm->ml_entry.prev;
+       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       if (!entry->free) {
+               return drm_mm_create_tail_node(mm, entry->start + entry->size,
+                                              size, atomic);
+       }
+       entry->size += size;
+       return 0;
+}
+
+static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+                                                unsigned long size,
+                                                int atomic)
+{
+       struct drm_mm_node *child;
+
+       child = drm_mm_kmalloc(parent->mm, atomic);
+       if (unlikely(child == NULL))
+               return NULL;
+
+       INIT_LIST_HEAD(&child->fl_entry);
+
+       child->free = 0;
+       child->size = size;
+       child->start = parent->start;
+       child->mm = parent->mm;
+
+       list_add_tail(&child->ml_entry, &parent->ml_entry);
+       INIT_LIST_HEAD(&child->fl_entry);
+
+       parent->size -= size;
+       parent->start += size;
+       return child;
+}
+
+
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+                                            unsigned long size,
+                                            unsigned alignment,
+                                            int atomic)
+{
+
+       struct drm_mm_node *align_splitoff = NULL;
+       unsigned tmp = 0;
+
+       if (alignment)
+               tmp = node->start % alignment;
+
+       if (tmp) {
+               align_splitoff =
+                   drm_mm_split_at_start(node, alignment - tmp, atomic);
+               if (unlikely(align_splitoff == NULL))
+                       return NULL;
+       }
+
+       if (node->size == size) {
+               list_del_init(&node->fl_entry);
+               node->free = 0;
+       } else {
+               node = drm_mm_split_at_start(node, size, atomic);
+       }
+
+       if (align_splitoff)
+               drm_mm_put_block(align_splitoff);
+
+       return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_generic);
+
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long start,
+                                               unsigned long end,
+                                               int atomic)
+{
+       struct drm_mm_node *align_splitoff = NULL;
+       unsigned tmp = 0;
+       unsigned wasted = 0;
+
+       if (node->start < start)
+               wasted += start - node->start;
+       if (alignment)
+               tmp = ((node->start + wasted) % alignment);
+
+       if (tmp)
+               wasted += alignment - tmp;
+       if (wasted) {
+               align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
+               if (unlikely(align_splitoff == NULL))
+                       return NULL;
+       }
+
+       if (node->size == size) {
+               list_del_init(&node->fl_entry);
+               node->free = 0;
+       } else {
+               node = drm_mm_split_at_start(node, size, atomic);
+       }
+
+       if (align_splitoff)
+               drm_mm_put_block(align_splitoff);
+
+       return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_range_generic);
+
+/*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+void drm_mm_put_block(struct drm_mm_node *cur)
+{
+
+       struct drm_mm *mm = cur->mm;
+       struct list_head *cur_head = &cur->ml_entry;
+       struct list_head *root_head = &mm->ml_entry;
+       struct drm_mm_node *prev_node = NULL;
+       struct drm_mm_node *next_node;
+
+       int merged = 0;
+
+       if (cur_head->prev != root_head) {
+               prev_node =
+                   list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+               if (prev_node->free) {
+                       prev_node->size += cur->size;
+                       merged = 1;
+               }
+       }
+       if (cur_head->next != root_head) {
+               next_node =
+                   list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+               if (next_node->free) {
+                       if (merged) {
+                               prev_node->size += next_node->size;
+                               list_del(&next_node->ml_entry);
+                               list_del(&next_node->fl_entry);
+                               spin_lock(&mm->unused_lock);
+                               if (mm->num_unused < MM_UNUSED_TARGET) {
+                                       list_add(&next_node->fl_entry,
+                                                &mm->unused_nodes);
+                                       ++mm->num_unused;
+                               } else
+                                       kfree(next_node);
+                               spin_unlock(&mm->unused_lock);
+                       } else {
+                               next_node->size += cur->size;
+                               next_node->start = cur->start;
+                               merged = 1;
+                       }
+               }
+       }
+       if (!merged) {
+               cur->free = 1;
+               list_add(&cur->fl_entry, &mm->fl_entry);
+       } else {
+               list_del(&cur->ml_entry);
+               spin_lock(&mm->unused_lock);
+               if (mm->num_unused < MM_UNUSED_TARGET) {
+                       list_add(&cur->fl_entry, &mm->unused_nodes);
+                       ++mm->num_unused;
+               } else
+                       kfree(cur);
+               spin_unlock(&mm->unused_lock);
+       }
+}
+
+EXPORT_SYMBOL(drm_mm_put_block);
+
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+                                      unsigned long size,
+                                      unsigned alignment, int best_match)
+{
+       struct list_head *list;
+       const struct list_head *free_stack = &mm->fl_entry;
+       struct drm_mm_node *entry;
+       struct drm_mm_node *best;
+       unsigned long best_size;
+       unsigned wasted;
+
+       best = NULL;
+       best_size = ~0UL;
+
+       list_for_each(list, free_stack) {
+               entry = list_entry(list, struct drm_mm_node, fl_entry);
+               wasted = 0;
+
+               if (entry->size < size)
+                       continue;
+
+               if (alignment) {
+                       register unsigned tmp = entry->start % alignment;
+                       if (tmp)
+                               wasted += alignment - tmp;
+               }
+
+               if (entry->size >= size + wasted) {
+                       if (!best_match)
+                               return entry;
+                       if (size < best_size) {
+                               best = entry;
+                               best_size = entry->size;
+                       }
+               }
+       }
+
+       return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free);
+
+struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long start,
+                                               unsigned long end,
+                                               int best_match)
+{
+       struct list_head *list;
+       const struct list_head *free_stack = &mm->fl_entry;
+       struct drm_mm_node *entry;
+       struct drm_mm_node *best;
+       unsigned long best_size;
+       unsigned wasted;
+
+       best = NULL;
+       best_size = ~0UL;
+
+       list_for_each(list, free_stack) {
+               entry = list_entry(list, struct drm_mm_node, fl_entry);
+               wasted = 0;
+
+               if (entry->size < size)
+                       continue;
+
+               if (entry->start > end || (entry->start+entry->size) < start)
+                       continue;
+
+               if (entry->start < start)
+                       wasted += start - entry->start;
+
+               if (alignment) {
+                       register unsigned tmp = (entry->start + wasted) % alignment;
+                       if (tmp)
+                               wasted += alignment - tmp;
+               }
+
+               if (entry->size >= size + wasted) {
+                       if (!best_match)
+                               return entry;
+                       if (size < best_size) {
+                               best = entry;
+                               best_size = entry->size;
+                       }
+               }
+       }
+
+       return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_in_range);
+
+int drm_mm_clean(struct drm_mm * mm)
+{
+       struct list_head *head = &mm->ml_entry;
+
+       return (head->next->next == head);
+}
+EXPORT_SYMBOL(drm_mm_clean);
+
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+{
+       INIT_LIST_HEAD(&mm->ml_entry);
+       INIT_LIST_HEAD(&mm->fl_entry);
+       INIT_LIST_HEAD(&mm->unused_nodes);
+       mm->num_unused = 0;
+       spin_lock_init(&mm->unused_lock);
+
+       return drm_mm_create_tail_node(mm, start, size, 0);
+}
+EXPORT_SYMBOL(drm_mm_init);
+
+void drm_mm_takedown(struct drm_mm * mm)
+{
+       struct list_head *bnode = mm->fl_entry.next;
+       struct drm_mm_node *entry;
+       struct drm_mm_node *next;
+
+       entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+
+       if (entry->ml_entry.next != &mm->ml_entry ||
+           entry->fl_entry.next != &mm->fl_entry) {
+               DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+               return;
+       }
+
+       list_del(&entry->fl_entry);
+       list_del(&entry->ml_entry);
+       kfree(entry);
+
+       spin_lock(&mm->unused_lock);
+       list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
+               list_del(&entry->fl_entry);
+               kfree(entry);
+               --mm->num_unused;
+       }
+       spin_unlock(&mm->unused_lock);
+
+       BUG_ON(mm->num_unused != 0);
+}
+EXPORT_SYMBOL(drm_mm_takedown);
+
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+       struct drm_mm_node *entry;
+       int total_used = 0, total_free = 0, total = 0;
+
+       list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+               printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+                       prefix, entry->start, entry->start + entry->size,
+                       entry->size, entry->free ? "free" : "used");
+               total += entry->size;
+               if (entry->free)
+                       total_free += entry->size;
+               else
+                       total_used += entry->size;
+       }
+       printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+               total_used, total_free);
+}
+EXPORT_SYMBOL(drm_mm_debug_table);
+
+#if defined(CONFIG_DEBUG_FS)
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+{
+       struct drm_mm_node *entry;
+       int total_used = 0, total_free = 0, total = 0;
+
+       list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+               seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
+               total += entry->size;
+               if (entry->free)
+                       total_free += entry->size;
+               else
+                       total_used += entry->size;
+       }
+       seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
+       return 0;
+}
+EXPORT_SYMBOL(drm_mm_dump_table);
+#endif
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_modes.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_modes.c
new file mode 100644 (file)
index 0000000..6d81a02
--- /dev/null
@@ -0,0 +1,1022 @@
+/*
+ * The list_sort function is (presumably) licensed under the GPL (see the
+ * top level "COPYING" file for details).
+ *
+ * The remainder of this file is:
+ *
+ * Copyright Â© 1997-2003 by The XFree86 Project, Inc.
+ * Copyright Â© 2007 Dave Airlie
+ * Copyright Â© 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2005-2006 Luc Verhaegen
+ * Copyright (c) 2001, Andy Ritger  aritger@nvidia.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+#include <linux/list.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+{
+       DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
+                       "0x%x 0x%x\n",
+               mode->base.id, mode->name, mode->vrefresh, mode->clock,
+               mode->hdisplay, mode->hsync_start,
+               mode->hsync_end, mode->htotal,
+               mode->vdisplay, mode->vsync_start,
+               mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+}
+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
+
+/**
+ * drm_cvt_mode -create a modeline based on CVT algorithm
+ * @dev: DRM device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh  : vrefresh rate
+ * @reduced : Whether the GTF calculation is simplified
+ * @interlaced:Whether the interlace is supported
+ *
+ * LOCKING:
+ * none.
+ *
+ * return the modeline based on CVT algorithm
+ *
+ * This function is called to generate the modeline based on CVT algorithm
+ * according to the hdisplay, vdisplay, vrefresh.
+ * It is based from the VESA(TM) Coordinated Video Timing Generator by
+ * Graham Loveridge April 9, 2003 available at
+ * http://www.vesa.org/public/CVT/CVTd6r1.xls
+ *
+ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
+ * What I have done is to translate it by using integer calculation.
+ */
+#define HV_FACTOR                      1000
+struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
+                                     int vdisplay, int vrefresh,
+                                     bool reduced, bool interlaced, bool margins)
+{
+       /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define        CVT_MARGIN_PERCENTAGE           18
+       /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define        CVT_H_GRANULARITY               8
+       /* 3) Minimum vertical porch (lines) - default 3 */
+#define        CVT_MIN_V_PORCH                 3
+       /* 4) Minimum number of vertical back porch lines - default 6 */
+#define        CVT_MIN_V_BPORCH                6
+       /* Pixel Clock step (kHz) */
+#define CVT_CLOCK_STEP                 250
+       struct drm_display_mode *drm_mode;
+       unsigned int vfieldrate, hperiod;
+       int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
+       int interlace;
+
+       /* allocate the drm_display_mode structure. If failure, we will
+        * return directly
+        */
+       drm_mode = drm_mode_create(dev);
+       if (!drm_mode)
+               return NULL;
+
+       /* the CVT default refresh rate is 60Hz */
+       if (!vrefresh)
+               vrefresh = 60;
+
+       /* the required field fresh rate */
+       if (interlaced)
+               vfieldrate = vrefresh * 2;
+       else
+               vfieldrate = vrefresh;
+
+       /* horizontal pixels */
+       hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
+
+       /* determine the left&right borders */
+       hmargin = 0;
+       if (margins) {
+               hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+               hmargin -= hmargin % CVT_H_GRANULARITY;
+       }
+       /* find the total active pixels */
+       drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
+
+       /* find the number of lines per field */
+       if (interlaced)
+               vdisplay_rnd = vdisplay / 2;
+       else
+               vdisplay_rnd = vdisplay;
+
+       /* find the top & bottom borders */
+       vmargin = 0;
+       if (margins)
+               vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+
+       drm_mode->vdisplay = vdisplay + 2 * vmargin;
+
+       /* Interlaced */
+       if (interlaced)
+               interlace = 1;
+       else
+               interlace = 0;
+
+       /* Determine VSync Width from aspect ratio */
+       if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
+               vsync = 4;
+       else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
+               vsync = 5;
+       else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
+               vsync = 6;
+       else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
+               vsync = 7;
+       else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
+               vsync = 7;
+       else /* custom */
+               vsync = 10;
+
+       if (!reduced) {
+               /* simplify the GTF calculation */
+               /* 4) Minimum time of vertical sync + back porch interval (µs)
+                * default 550.0
+                */
+               int tmp1, tmp2;
+#define CVT_MIN_VSYNC_BP       550
+               /* 3) Nominal HSync width (% of line period) - default 8 */
+#define CVT_HSYNC_PERCENTAGE   8
+               unsigned int hblank_percentage;
+               int vsyncandback_porch, vback_porch, hblank;
+
+               /* estimated the horizontal period */
+               tmp1 = HV_FACTOR * 1000000  -
+                               CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
+               tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
+                               interlace;
+               hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
+
+               tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
+               /* 9. Find number of lines in sync + backporch */
+               if (tmp1 < (vsync + CVT_MIN_V_PORCH))
+                       vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
+               else
+                       vsyncandback_porch = tmp1;
+               /* 10. Find number of lines in back porch */
+               vback_porch = vsyncandback_porch - vsync;
+               drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
+                               vsyncandback_porch + CVT_MIN_V_PORCH;
+               /* 5) Definition of Horizontal blanking time limitation */
+               /* Gradient (%/kHz) - default 600 */
+#define CVT_M_FACTOR   600
+               /* Offset (%) - default 40 */
+#define CVT_C_FACTOR   40
+               /* Blanking time scaling factor - default 128 */
+#define CVT_K_FACTOR   128
+               /* Scaling factor weighting - default 20 */
+#define CVT_J_FACTOR   20
+#define CVT_M_PRIME    (CVT_M_FACTOR * CVT_K_FACTOR / 256)
+#define CVT_C_PRIME    ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
+                        CVT_J_FACTOR)
+               /* 12. Find ideal blanking duty cycle from formula */
+               hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
+                                       hperiod / 1000;
+               /* 13. Blanking time */
+               if (hblank_percentage < 20 * HV_FACTOR)
+                       hblank_percentage = 20 * HV_FACTOR;
+               hblank = drm_mode->hdisplay * hblank_percentage /
+                        (100 * HV_FACTOR - hblank_percentage);
+               hblank -= hblank % (2 * CVT_H_GRANULARITY);
+               /* 14. find the total pixes per line */
+               drm_mode->htotal = drm_mode->hdisplay + hblank;
+               drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
+               drm_mode->hsync_start = drm_mode->hsync_end -
+                       (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
+               drm_mode->hsync_start += CVT_H_GRANULARITY -
+                       drm_mode->hsync_start % CVT_H_GRANULARITY;
+               /* fill the Vsync values */
+               drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
+               drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+       } else {
+               /* Reduced blanking */
+               /* Minimum vertical blanking interval time (µs)- default 460 */
+#define CVT_RB_MIN_VBLANK      460
+               /* Fixed number of clocks for horizontal sync */
+#define CVT_RB_H_SYNC          32
+               /* Fixed number of clocks for horizontal blanking */
+#define CVT_RB_H_BLANK         160
+               /* Fixed number of lines for vertical front porch - default 3*/
+#define CVT_RB_VFPORCH         3
+               int vbilines;
+               int tmp1, tmp2;
+               /* 8. Estimate Horizontal period. */
+               tmp1 = HV_FACTOR * 1000000 -
+                       CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
+               tmp2 = vdisplay_rnd + 2 * vmargin;
+               hperiod = tmp1 / (tmp2 * vfieldrate);
+               /* 9. Find number of lines in vertical blanking */
+               vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
+               /* 10. Check if vertical blanking is sufficient */
+               if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
+                       vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
+               /* 11. Find total number of lines in vertical field */
+               drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
+               /* 12. Find total number of pixels in a line */
+               drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
+               /* Fill in HSync values */
+               drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
+               drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
+       }
+       /* 15/13. Find pixel clock frequency (kHz for xf86) */
+       drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
+       drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+       /* 18/16. Find actual vertical frame frequency */
+       /* ignore - just set the mode flag for interlaced */
+       if (interlaced)
+               drm_mode->vtotal *= 2;
+       /* Fill the mode line name */
+       drm_mode_set_name(drm_mode);
+       if (reduced)
+               drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
+                                       DRM_MODE_FLAG_NVSYNC);
+       else
+               drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
+                                       DRM_MODE_FLAG_NHSYNC);
+       if (interlaced)
+               drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+    return drm_mode;
+}
+EXPORT_SYMBOL(drm_cvt_mode);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev                :drm device
+ * @hdisplay   :hdisplay size
+ * @vdisplay   :vdisplay size
+ * @vrefresh   :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins    :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ *     GTF Spreadsheet by Andy Morrish (1/5/97)
+ *     available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ */
+struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
+                                     int vdisplay, int vrefresh,
+                                     bool interlaced, int margins)
+{
+       /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define        GTF_MARGIN_PERCENTAGE           18
+       /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define        GTF_CELL_GRAN                   8
+       /* 3) Minimum vertical porch (lines) - default 3 */
+#define        GTF_MIN_V_PORCH                 1
+       /* width of vsync in lines */
+#define V_SYNC_RQD                     3
+       /* width of hsync as % of total line */
+#define H_SYNC_PERCENT                 8
+       /* min time of vsync + back porch (microsec) */
+#define MIN_VSYNC_PLUS_BP              550
+       /* blanking formula gradient */
+#define GTF_M                          600
+       /* blanking formula offset */
+#define GTF_C                          40
+       /* blanking formula scaling factor */
+#define GTF_K                          128
+       /* blanking formula scaling factor */
+#define GTF_J                          20
+       /* C' and M' are part of the Blanking Duty Cycle computation */
+#define GTF_C_PRIME            (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
+#define GTF_M_PRIME            (GTF_K * GTF_M / 256)
+       struct drm_display_mode *drm_mode;
+       unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
+       int top_margin, bottom_margin;
+       int interlace;
+       unsigned int hfreq_est;
+       int vsync_plus_bp, vback_porch;
+       unsigned int vtotal_lines, vfieldrate_est, hperiod;
+       unsigned int vfield_rate, vframe_rate;
+       int left_margin, right_margin;
+       unsigned int total_active_pixels, ideal_duty_cycle;
+       unsigned int hblank, total_pixels, pixel_freq;
+       int hsync, hfront_porch, vodd_front_porch_lines;
+       unsigned int tmp1, tmp2;
+
+       drm_mode = drm_mode_create(dev);
+       if (!drm_mode)
+               return NULL;
+
+       /* 1. In order to give correct results, the number of horizontal
+        * pixels requested is first processed to ensure that it is divisible
+        * by the character size, by rounding it to the nearest character
+        * cell boundary:
+        */
+       hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+       hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
+
+       /* 2. If interlace is requested, the number of vertical lines assumed
+        * by the calculation must be halved, as the computation calculates
+        * the number of vertical lines per field.
+        */
+       if (interlaced)
+               vdisplay_rnd = vdisplay / 2;
+       else
+               vdisplay_rnd = vdisplay;
+
+       /* 3. Find the frame rate required: */
+       if (interlaced)
+               vfieldrate_rqd = vrefresh * 2;
+       else
+               vfieldrate_rqd = vrefresh;
+
+       /* 4. Find number of lines in Top margin: */
+       top_margin = 0;
+       if (margins)
+               top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+                               1000;
+       /* 5. Find number of lines in bottom margin: */
+       bottom_margin = top_margin;
+
+       /* 6. If interlace is required, then set variable interlace: */
+       if (interlaced)
+               interlace = 1;
+       else
+               interlace = 0;
+
+       /* 7. Estimate the Horizontal frequency */
+       {
+               tmp1 = (1000000  - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
+               tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
+                               2 + interlace;
+               hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
+       }
+
+       /* 8. Find the number of lines in V sync + back porch */
+       /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
+       vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
+       vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
+       /*  9. Find the number of lines in V back porch alone: */
+       vback_porch = vsync_plus_bp - V_SYNC_RQD;
+       /*  10. Find the total number of lines in Vertical field period: */
+       vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
+                       vsync_plus_bp + GTF_MIN_V_PORCH;
+       /*  11. Estimate the Vertical field frequency: */
+       vfieldrate_est = hfreq_est / vtotal_lines;
+       /*  12. Find the actual horizontal period: */
+       hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
+
+       /*  13. Find the actual Vertical field frequency: */
+       vfield_rate = hfreq_est / vtotal_lines;
+       /*  14. Find the Vertical frame frequency: */
+       if (interlaced)
+               vframe_rate = vfield_rate / 2;
+       else
+               vframe_rate = vfield_rate;
+       /*  15. Find number of pixels in left margin: */
+       if (margins)
+               left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+                               1000;
+       else
+               left_margin = 0;
+
+       /* 16.Find number of pixels in right margin: */
+       right_margin = left_margin;
+       /* 17.Find total number of active pixels in image and left and right */
+       total_active_pixels = hdisplay_rnd + left_margin + right_margin;
+       /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
+       ideal_duty_cycle = GTF_C_PRIME * 1000 -
+                               (GTF_M_PRIME * 1000000 / hfreq_est);
+       /* 19.Find the number of pixels in the blanking time to the nearest
+        * double character cell: */
+       hblank = total_active_pixels * ideal_duty_cycle /
+                       (100000 - ideal_duty_cycle);
+       hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
+       hblank = hblank * 2 * GTF_CELL_GRAN;
+       /* 20.Find total number of pixels: */
+       total_pixels = total_active_pixels + hblank;
+       /* 21.Find pixel clock frequency: */
+       pixel_freq = total_pixels * hfreq_est / 1000;
+       /* Stage 1 computations are now complete; I should really pass
+        * the results to another function and do the Stage 2 computations,
+        * but I only need a few more values so I'll just append the
+        * computations here for now */
+       /* 17. Find the number of pixels in the horizontal sync period: */
+       hsync = H_SYNC_PERCENT * total_pixels / 100;
+       hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+       hsync = hsync * GTF_CELL_GRAN;
+       /* 18. Find the number of pixels in horizontal front porch period */
+       hfront_porch = hblank / 2 - hsync;
+       /*  36. Find the number of lines in the odd front porch period: */
+       vodd_front_porch_lines = GTF_MIN_V_PORCH ;
+
+       /* finally, pack the results in the mode struct */
+       drm_mode->hdisplay = hdisplay_rnd;
+       drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
+       drm_mode->hsync_end = drm_mode->hsync_start + hsync;
+       drm_mode->htotal = total_pixels;
+       drm_mode->vdisplay = vdisplay_rnd;
+       drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
+       drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
+       drm_mode->vtotal = vtotal_lines;
+
+       drm_mode->clock = pixel_freq;
+
+       drm_mode_set_name(drm_mode);
+       drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+
+       if (interlaced) {
+               drm_mode->vtotal *= 2;
+               drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       }
+
+       return drm_mode;
+}
+EXPORT_SYMBOL(drm_gtf_mode);
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void drm_mode_set_name(struct drm_display_mode *mode)
+{
+       snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
+                mode->vdisplay);
+}
+EXPORT_SYMBOL(drm_mode_set_name);
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+       struct list_head *entry, *tmp;
+
+       list_for_each_safe(entry, tmp, head) {
+               list_move_tail(entry, new);
+       }
+}
+EXPORT_SYMBOL(drm_mode_list_concat);
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int drm_mode_width(struct drm_display_mode *mode)
+{
+       return mode->hdisplay;
+
+}
+EXPORT_SYMBOL(drm_mode_width);
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int drm_mode_height(struct drm_display_mode *mode)
+{
+       return mode->vdisplay;
+}
+EXPORT_SYMBOL(drm_mode_height);
+
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(struct drm_display_mode *mode)
+{
+       unsigned int calc_val;
+
+       if (mode->hsync)
+               return mode->hsync;
+
+       if (mode->htotal < 0)
+               return 0;
+
+       calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+       calc_val += 500;                                /* round to 1000Hz */
+       calc_val /= 1000;                               /* truncate to kHz */
+
+       return calc_val;
+}
+EXPORT_SYMBOL(drm_mode_hsync);
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
+ *
+ * FIXME: why is this needed?  shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate. It will be the result of actual value plus 0.5.
+ * If it is 70.288, it will return 70Hz.
+ * If it is 59.6, it will return 60Hz.
+ */
+int drm_mode_vrefresh(struct drm_display_mode *mode)
+{
+       int refresh = 0;
+       unsigned int calc_val;
+
+       if (mode->vrefresh > 0)
+               refresh = mode->vrefresh;
+       else if (mode->htotal > 0 && mode->vtotal > 0) {
+               int vtotal;
+               vtotal = mode->vtotal;
+               /* work out vrefresh the value will be x1000 */
+               calc_val = (mode->clock * 1000);
+               calc_val /= mode->htotal;
+               refresh = (calc_val + vtotal / 2) / vtotal;
+
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       refresh *= 2;
+               if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+                       refresh /= 2;
+               if (mode->vscan > 1)
+                       refresh /= mode->vscan;
+       }
+       return refresh;
+}
+EXPORT_SYMBOL(drm_mode_vrefresh);
+
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+       if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+               return;
+
+       p->crtc_hdisplay = p->hdisplay;
+       p->crtc_hsync_start = p->hsync_start;
+       p->crtc_hsync_end = p->hsync_end;
+       p->crtc_htotal = p->htotal;
+       p->crtc_hskew = p->hskew;
+       p->crtc_vdisplay = p->vdisplay;
+       p->crtc_vsync_start = p->vsync_start;
+       p->crtc_vsync_end = p->vsync_end;
+       p->crtc_vtotal = p->vtotal;
+
+       if (p->flags & DRM_MODE_FLAG_INTERLACE) {
+               if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+                       p->crtc_vdisplay /= 2;
+                       p->crtc_vsync_start /= 2;
+                       p->crtc_vsync_end /= 2;
+                       p->crtc_vtotal /= 2;
+               }
+
+               p->crtc_vtotal |= 1;
+       }
+
+       if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+               p->crtc_vdisplay *= 2;
+               p->crtc_vsync_start *= 2;
+               p->crtc_vsync_end *= 2;
+               p->crtc_vtotal *= 2;
+       }
+
+       if (p->vscan > 1) {
+               p->crtc_vdisplay *= p->vscan;
+               p->crtc_vsync_start *= p->vscan;
+               p->crtc_vsync_end *= p->vscan;
+               p->crtc_vtotal *= p->vscan;
+       }
+
+       p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+       p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+       p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+       p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+
+       p->crtc_hadjusted = false;
+       p->crtc_vadjusted = false;
+}
+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it.  Used to create new instances of established modes.
+ */
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+                                           struct drm_display_mode *mode)
+{
+       struct drm_display_mode *nmode;
+       int new_id;
+
+       nmode = drm_mode_create(dev);
+       if (!nmode)
+               return NULL;
+
+       new_id = nmode->base.id;
+       *nmode = *mode;
+       nmode->base.id = new_id;
+       INIT_LIST_HEAD(&nmode->head);
+       return nmode;
+}
+EXPORT_SYMBOL(drm_mode_duplicate);
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+{
+       /* do clock check convert to PICOS so fb modes get matched
+        * the same */
+       if (mode1->clock && mode2->clock) {
+               if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
+                       return false;
+       } else if (mode1->clock != mode2->clock)
+               return false;
+
+       if (mode1->hdisplay == mode2->hdisplay &&
+           mode1->hsync_start == mode2->hsync_start &&
+           mode1->hsync_end == mode2->hsync_end &&
+           mode1->htotal == mode2->htotal &&
+           mode1->hskew == mode2->hskew &&
+           mode1->vdisplay == mode2->vdisplay &&
+           mode1->vsync_start == mode2->vsync_start &&
+           mode1->vsync_end == mode2->vsync_end &&
+           mode1->vtotal == mode2->vtotal &&
+           mode1->vscan == mode2->vscan &&
+           mode1->flags == mode2->flags)
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits.  Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+void drm_mode_validate_size(struct drm_device *dev,
+                           struct list_head *mode_list,
+                           int maxX, int maxY, int maxPitch)
+{
+       struct drm_display_mode *mode;
+
+       list_for_each_entry(mode, mode_list, head) {
+               if (maxPitch > 0 && mode->hdisplay > maxPitch)
+                       mode->status = MODE_BAD_WIDTH;
+
+               if (maxX > 0 && mode->hdisplay > maxX)
+                       mode->status = MODE_VIRTUAL_X;
+
+               if (maxY > 0 && mode->vdisplay > maxY)
+                       mode->status = MODE_VIRTUAL_Y;
+       }
+}
+EXPORT_SYMBOL(drm_mode_validate_size);
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question.  This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+void drm_mode_validate_clocks(struct drm_device *dev,
+                             struct list_head *mode_list,
+                             int *min, int *max, int n_ranges)
+{
+       struct drm_display_mode *mode;
+       int i;
+
+       list_for_each_entry(mode, mode_list, head) {
+               bool good = false;
+               for (i = 0; i < n_ranges; i++) {
+                       if (mode->clock >= min[i] && mode->clock <= max[i]) {
+                               good = true;
+                               break;
+                       }
+               }
+               if (!good)
+                       mode->status = MODE_CLOCK_RANGE;
+       }
+}
+EXPORT_SYMBOL(drm_mode_validate_clocks);
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list.  If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void drm_mode_prune_invalid(struct drm_device *dev,
+                           struct list_head *mode_list, bool verbose)
+{
+       struct drm_display_mode *mode, *t;
+
+       list_for_each_entry_safe(mode, t, mode_list, head) {
+               if (mode->status != MODE_OK) {
+                       list_del(&mode->head);
+                       if (verbose) {
+                               drm_mode_debug_printmodeline(mode);
+                               DRM_DEBUG_KMS("Not using %s mode %d\n",
+                                       mode->name, mode->status);
+                       }
+                       drm_mode_destroy(dev, mode);
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_prune_invalid);
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+{
+       struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+       struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+       int diff;
+
+       diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+               ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+       if (diff)
+               return diff;
+       diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+       if (diff)
+               return diff;
+       diff = b->clock - a->clock;
+       return diff;
+}
+
+/* FIXME: what we don't have a list sort function? */
+/* list sort from Mark J Roberts (mjr@znex.org) */
+void list_sort(struct list_head *head,
+              int (*cmp)(struct list_head *a, struct list_head *b))
+{
+       struct list_head *p, *q, *e, *list, *tail, *oldhead;
+       int insize, nmerges, psize, qsize, i;
+
+       list = head->next;
+       list_del(head);
+       insize = 1;
+       for (;;) {
+               p = oldhead = list;
+               list = tail = NULL;
+               nmerges = 0;
+
+               while (p) {
+                       nmerges++;
+                       q = p;
+                       psize = 0;
+                       for (i = 0; i < insize; i++) {
+                               psize++;
+                               q = q->next == oldhead ? NULL : q->next;
+                               if (!q)
+                                       break;
+                       }
+
+                       qsize = insize;
+                       while (psize > 0 || (qsize > 0 && q)) {
+                               if (!psize) {
+                                       e = q;
+                                       q = q->next;
+                                       qsize--;
+                                       if (q == oldhead)
+                                               q = NULL;
+                               } else if (!qsize || !q) {
+                                       e = p;
+                                       p = p->next;
+                                       psize--;
+                                       if (p == oldhead)
+                                               p = NULL;
+                               } else if (cmp(p, q) <= 0) {
+                                       e = p;
+                                       p = p->next;
+                                       psize--;
+                                       if (p == oldhead)
+                                               p = NULL;
+                               } else {
+                                       e = q;
+                                       q = q->next;
+                                       qsize--;
+                                       if (q == oldhead)
+                                               q = NULL;
+                               }
+                               if (tail)
+                                       tail->next = e;
+                               else
+                                       list = e;
+                               e->prev = tail;
+                               tail = e;
+                       }
+                       p = q;
+               }
+
+               tail->next = list;
+               list->prev = tail;
+
+               if (nmerges <= 1)
+                       break;
+
+               insize *= 2;
+       }
+
+       head->next = list;
+       head->prev = list->prev;
+       list->prev->next = head;
+       list->prev = head;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+void drm_mode_sort(struct list_head *mode_list)
+{
+       list_sort(mode_list, drm_mode_compare);
+}
+EXPORT_SYMBOL(drm_mode_sort);
+
+/**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void drm_mode_connector_list_update(struct drm_connector *connector)
+{
+       struct drm_display_mode *mode;
+       struct drm_display_mode *pmode, *pt;
+       int found_it;
+
+       list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
+                                head) {
+               found_it = 0;
+               /* go through current modes checking for the new probed mode */
+               list_for_each_entry(mode, &connector->modes, head) {
+                       if (drm_mode_equal(pmode, mode)) {
+                               found_it = 1;
+                               /* if equal delete the probed mode */
+                               mode->status = pmode->status;
+                               /* Merge type bits together */
+                               mode->type |= pmode->type;
+                               list_del(&pmode->head);
+                               drm_mode_destroy(connector->dev, pmode);
+                               break;
+                       }
+               }
+
+               if (!found_it) {
+                       list_move_tail(&pmode->head, &connector->modes);
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_connector_list_update);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_pci.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_pci.c
new file mode 100644 (file)
index 0000000..577094f
--- /dev/null
@@ -0,0 +1,132 @@
+/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
+/**
+ * \file drm_pci.c
+ * \brief Functions and ioctls to manage PCI memory
+ *
+ * \warning These interfaces aren't stable yet.
+ *
+ * \todo Implement the remaining ioctl's for the PCI pools.
+ * \todo The wrappers here are so thin that they would be better off inlined..
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ * \author Leif Delgass <ldelgass@retinalburn.net>
+ */
+
+/*
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "drmP.h"
+
+/**********************************************************************/
+/** \name PCI memory */
+/*@{*/
+
+/**
+ * \brief Allocate a PCI consistent memory block, for DMA.
+ */
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
+                               dma_addr_t maxaddr)
+{
+       drm_dma_handle_t *dmah;
+#if 1
+       unsigned long addr;
+       size_t sz;
+#endif
+
+       /* pci_alloc_consistent only guarantees alignment to the smallest
+        * PAGE_SIZE order which is greater than or equal to the requested size.
+        * Return NULL here for now to make sure nobody tries for larger alignment
+        */
+       if (align > size)
+               return NULL;
+
+       if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
+               DRM_ERROR("Setting pci dma mask failed\n");
+               return NULL;
+       }
+
+       dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
+       if (!dmah)
+               return NULL;
+
+       dmah->size = size;
+       dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+
+       if (dmah->vaddr == NULL) {
+               kfree(dmah);
+               return NULL;
+       }
+
+       memset(dmah->vaddr, 0, size);
+
+       /* XXX - Is virt_to_page() legal for consistent mem? */
+       /* Reserve */
+       for (addr = (unsigned long)dmah->vaddr, sz = size;
+            sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+               SetPageReserved(virt_to_page(addr));
+       }
+
+       return dmah;
+}
+
+EXPORT_SYMBOL(drm_pci_alloc);
+
+/**
+ * \brief Free a PCI consistent memory block without freeing its descriptor.
+ *
+ * This function is for internal use in the Linux-specific DRM core code.
+ */
+void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+#if 1
+       unsigned long addr;
+       size_t sz;
+#endif
+
+       if (dmah->vaddr) {
+               /* XXX - Is virt_to_page() legal for consistent mem? */
+               /* Unreserve */
+               for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
+                    sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+                       ClearPageReserved(virt_to_page(addr));
+               }
+               dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+                                 dmah->busaddr);
+       }
+}
+
+/**
+ * \brief Free a PCI consistent memory block
+ */
+void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+       __drm_pci_free(dev, dmah);
+       kfree(dmah);
+}
+
+EXPORT_SYMBOL(drm_pci_free);
+
+/*@}*/
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_proc.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_proc.c
new file mode 100644 (file)
index 0000000..d379c4f
--- /dev/null
@@ -0,0 +1,234 @@
+/**
+ * \file drm_proc.c
+ * /proc support for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * \par Acknowledgements:
+ *    Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
+ *    the problem with the proc files not outputting all their information.
+ */
+
+/*
+ * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+/**
+ * Proc file list.
+ */
+static struct drm_info_list drm_proc_list[] = {
+       {"name", drm_name_info, 0},
+       {"vm", drm_vm_info, 0},
+       {"clients", drm_clients_info, 0},
+       {"queues", drm_queues_info, 0},
+       {"bufs", drm_bufs_info, 0},
+       {"gem_names", drm_gem_name_info, DRIVER_GEM},
+       {"gem_objects", drm_gem_object_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+       {"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
+
+static int drm_proc_open(struct inode *inode, struct file *file)
+{
+       struct drm_info_node* node = PDE(inode)->data;
+
+       return single_open(file, node->info_ent->show, node);
+}
+
+static const struct file_operations drm_proc_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_proc_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+
+/**
+ * Initialize a given set of proc files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI proc dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of proc files represented by an array of
+ * gdm_proc_lists in the given root directory.
+ */
+int drm_proc_create_files(struct drm_info_list *files, int count,
+                         struct proc_dir_entry *root, struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+       struct proc_dir_entry *ent;
+       struct drm_info_node *tmp;
+       char name[64];
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+               u32 features = files[i].driver_features;
+
+               if (features != 0 &&
+                   (dev->driver->driver_features & features) != features)
+                       continue;
+
+               tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+               if (tmp == NULL) {
+                       ret = -1;
+                       goto fail;
+               }
+               tmp->minor = minor;
+               tmp->info_ent = &files[i];
+               list_add(&tmp->list, &minor->proc_nodes.list);
+
+               ent = proc_create_data(files[i].name, S_IRUGO, root,
+                                      &drm_proc_fops, tmp);
+               if (!ent) {
+                       DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
+                                 name, files[i].name);
+                       list_del(&tmp->list);
+                       kfree(tmp);
+                       ret = -1;
+                       goto fail;
+               }
+
+       }
+       return 0;
+
+fail:
+       for (i = 0; i < count; i++)
+               remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
+       return ret;
+}
+
+/**
+ * Initialize the DRI proc filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
+ * \param root DRI proc dir entry.
+ * \param dev_root resulting DRI device proc dir entry.
+ * \return root entry pointer on success, or NULL on failure.
+ *
+ * Create the DRI proc root entry "/proc/dri", the device proc root entry
+ * "/proc/dri/%minor%/", and each entry in proc_list as
+ * "/proc/dri/%minor%/%name%".
+ */
+int drm_proc_init(struct drm_minor *minor, int minor_id,
+                 struct proc_dir_entry *root)
+{
+       struct drm_device *dev = minor->dev;
+       char name[64];
+       int ret;
+
+       INIT_LIST_HEAD(&minor->proc_nodes.list);
+       sprintf(name, "%d", minor_id);
+       minor->proc_root = proc_mkdir(name, root);
+       if (!minor->proc_root) {
+               DRM_ERROR("Cannot create /proc/dri/%s\n", name);
+               return -1;
+       }
+
+       ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
+                                   minor->proc_root, minor);
+       if (ret) {
+               remove_proc_entry(name, root);
+               minor->proc_root = NULL;
+               DRM_ERROR("Failed to create core drm proc files\n");
+               return ret;
+       }
+
+       if (dev->driver->proc_init) {
+               ret = dev->driver->proc_init(minor);
+               if (ret) {
+                       DRM_ERROR("DRM: Driver failed to initialize "
+                                 "/proc/dri.\n");
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+int drm_proc_remove_files(struct drm_info_list *files, int count,
+                         struct drm_minor *minor)
+{
+       struct list_head *pos, *q;
+       struct drm_info_node *tmp;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               list_for_each_safe(pos, q, &minor->proc_nodes.list) {
+                       tmp = list_entry(pos, struct drm_info_node, list);
+                       if (tmp->info_ent == &files[i]) {
+                               remove_proc_entry(files[i].name,
+                                                 minor->proc_root);
+                               list_del(pos);
+                               kfree(tmp);
+                       }
+               }
+       }
+       return 0;
+}
+
+/**
+ * Cleanup the proc filesystem resources.
+ *
+ * \param minor device minor number.
+ * \param root DRI proc dir entry.
+ * \param dev_root DRI device proc dir entry.
+ * \return always zero.
+ *
+ * Remove all proc entries created by proc_init().
+ */
+int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
+{
+       struct drm_device *dev = minor->dev;
+       char name[64];
+
+       if (!root || !minor->proc_root)
+               return 0;
+
+       if (dev->driver->proc_cleanup)
+               dev->driver->proc_cleanup(minor);
+
+       drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
+
+       sprintf(name, "%d", minor->index);
+       remove_proc_entry(name, root);
+
+       return 0;
+}
+
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_scatter.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_scatter.c
new file mode 100644 (file)
index 0000000..c7823c8
--- /dev/null
@@ -0,0 +1,218 @@
+/**
+ * \file drm_scatter.c
+ * IOCTLs to manage scatter/gather memory
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include "drmP.h"
+
+#define DEBUG_SCATTER 0
+
+static inline void *drm_vmalloc_dma(unsigned long size)
+{
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+       return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
+#else
+       return vmalloc_32(size);
+#endif
+}
+
+void drm_sg_cleanup(struct drm_sg_mem * entry)
+{
+       struct page *page;
+       int i;
+
+       for (i = 0; i < entry->pages; i++) {
+               page = entry->pagelist[i];
+               if (page)
+                       ClearPageReserved(page);
+       }
+
+       vfree(entry->virtual);
+
+       kfree(entry->busaddr);
+       kfree(entry->pagelist);
+       kfree(entry);
+}
+
+#ifdef _LP64
+# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
+#else
+# define ScatterHandle(x) (unsigned int)(x)
+#endif
+
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+{
+       struct drm_sg_mem *entry;
+       unsigned long pages, i, j;
+
+       DRM_DEBUG("\n");
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       if (dev->sg)
+               return -EINVAL;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       memset(entry, 0, sizeof(*entry));
+       pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+       DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
+
+       entry->pages = pages;
+       entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL);
+       if (!entry->pagelist) {
+               kfree(entry);
+               return -ENOMEM;
+       }
+
+       memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
+
+       entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL);
+       if (!entry->busaddr) {
+               kfree(entry->pagelist);
+               kfree(entry);
+               return -ENOMEM;
+       }
+       memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
+
+       entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+       if (!entry->virtual) {
+               kfree(entry->busaddr);
+               kfree(entry->pagelist);
+               kfree(entry);
+               return -ENOMEM;
+       }
+
+       /* This also forces the mapping of COW pages, so our page list
+        * will be valid.  Please don't remove it...
+        */
+       memset(entry->virtual, 0, pages << PAGE_SHIFT);
+
+       entry->handle = ScatterHandle((unsigned long)entry->virtual);
+
+       DRM_DEBUG("handle  = %08lx\n", entry->handle);
+       DRM_DEBUG("virtual = %p\n", entry->virtual);
+
+       for (i = (unsigned long)entry->virtual, j = 0; j < pages;
+            i += PAGE_SIZE, j++) {
+               entry->pagelist[j] = vmalloc_to_page((void *)i);
+               if (!entry->pagelist[j])
+                       goto failed;
+               SetPageReserved(entry->pagelist[j]);
+       }
+
+       request->handle = entry->handle;
+
+       dev->sg = entry;
+
+#if DEBUG_SCATTER
+       /* Verify that each page points to its virtual address, and vice
+        * versa.
+        */
+       {
+               int error = 0;
+
+               for (i = 0; i < pages; i++) {
+                       unsigned long *tmp;
+
+                       tmp = page_address(entry->pagelist[i]);
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               *tmp = 0xcafebabe;
+                       }
+                       tmp = (unsigned long *)((u8 *) entry->virtual +
+                                               (PAGE_SIZE * i));
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               if (*tmp != 0xcafebabe && error == 0) {
+                                       error = 1;
+                                       DRM_ERROR("Scatter allocation error, "
+                                                 "pagelist does not match "
+                                                 "virtual mapping\n");
+                               }
+                       }
+                       tmp = page_address(entry->pagelist[i]);
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               *tmp = 0;
+                       }
+               }
+               if (error == 0)
+                       DRM_ERROR("Scatter allocation matches pagelist\n");
+       }
+#endif
+
+       return 0;
+
+      failed:
+       drm_sg_cleanup(entry);
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_sg_alloc);
+
+
+int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_scatter_gather *request = data;
+
+       return drm_sg_alloc(dev, request);
+
+}
+
+int drm_sg_free(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_scatter_gather *request = data;
+       struct drm_sg_mem *entry;
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       entry = dev->sg;
+       dev->sg = NULL;
+
+       if (!entry || entry->handle != request->handle)
+               return -EINVAL;
+
+       DRM_DEBUG("virtual  = %p\n", entry->virtual);
+
+       drm_sg_cleanup(entry);
+
+       return 0;
+}
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_sman.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_sman.c
new file mode 100644 (file)
index 0000000..463aed9
--- /dev/null
@@ -0,0 +1,352 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple memory manager interface that keeps track on allocate regions on a
+ * per "owner" basis. All regions associated with an "owner" can be released
+ * with a simple call. Typically if the "owner" exists. The owner is any
+ * "unsigned long" identifier. Can typically be a pointer to a file private
+ * struct or a context identifier.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drm_sman.h"
+
+struct drm_owner_item {
+       struct drm_hash_item owner_hash;
+       struct list_head sman_list;
+       struct list_head mem_blocks;
+};
+
+void drm_sman_takedown(struct drm_sman * sman)
+{
+       drm_ht_remove(&sman->user_hash_tab);
+       drm_ht_remove(&sman->owner_hash_tab);
+       kfree(sman->mm);
+}
+
+EXPORT_SYMBOL(drm_sman_takedown);
+
+int
+drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+             unsigned int user_order, unsigned int owner_order)
+{
+       int ret = 0;
+
+       sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
+                                                 sizeof(*sman->mm),
+                                                 GFP_KERNEL);
+       if (!sman->mm) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       sman->num_managers = num_managers;
+       INIT_LIST_HEAD(&sman->owner_items);
+       ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
+       if (ret)
+               goto out1;
+       ret = drm_ht_create(&sman->user_hash_tab, user_order);
+       if (!ret)
+               goto out;
+
+       drm_ht_remove(&sman->owner_hash_tab);
+out1:
+       kfree(sman->mm);
+out:
+       return ret;
+}
+
+EXPORT_SYMBOL(drm_sman_init);
+
+static void *drm_sman_mm_allocate(void *private, unsigned long size,
+                                 unsigned alignment)
+{
+       struct drm_mm *mm = (struct drm_mm *) private;
+       struct drm_mm_node *tmp;
+
+       tmp = drm_mm_search_free(mm, size, alignment, 1);
+       if (!tmp) {
+               return NULL;
+       }
+       tmp = drm_mm_get_block(tmp, size, alignment);
+       return tmp;
+}
+
+static void drm_sman_mm_free(void *private, void *ref)
+{
+       struct drm_mm_node *node = (struct drm_mm_node *) ref;
+
+       drm_mm_put_block(node);
+}
+
+static void drm_sman_mm_destroy(void *private)
+{
+       struct drm_mm *mm = (struct drm_mm *) private;
+       drm_mm_takedown(mm);
+       kfree(mm);
+}
+
+static unsigned long drm_sman_mm_offset(void *private, void *ref)
+{
+       struct drm_mm_node *node = (struct drm_mm_node *) ref;
+       return node->start;
+}
+
+int
+drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+                  unsigned long start, unsigned long size)
+{
+       struct drm_sman_mm *sman_mm;
+       struct drm_mm *mm;
+       int ret;
+
+       BUG_ON(manager >= sman->num_managers);
+
+       sman_mm = &sman->mm[manager];
+       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+       if (!mm) {
+               return -ENOMEM;
+       }
+       sman_mm->private = mm;
+       ret = drm_mm_init(mm, start, size);
+
+       if (ret) {
+               kfree(mm);
+               return ret;
+       }
+
+       sman_mm->allocate = drm_sman_mm_allocate;
+       sman_mm->free = drm_sman_mm_free;
+       sman_mm->destroy = drm_sman_mm_destroy;
+       sman_mm->offset = drm_sman_mm_offset;
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_set_range);
+
+int
+drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
+                    struct drm_sman_mm * allocator)
+{
+       BUG_ON(manager >= sman->num_managers);
+       sman->mm[manager] = *allocator;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_sman_set_manager);
+
+static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
+                                                unsigned long owner)
+{
+       int ret;
+       struct drm_hash_item *owner_hash_item;
+       struct drm_owner_item *owner_item;
+
+       ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
+       if (!ret) {
+               return drm_hash_entry(owner_hash_item, struct drm_owner_item,
+                                     owner_hash);
+       }
+
+       owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
+       if (!owner_item)
+               goto out;
+
+       INIT_LIST_HEAD(&owner_item->mem_blocks);
+       owner_item->owner_hash.key = owner;
+       if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
+               goto out1;
+
+       list_add_tail(&owner_item->sman_list, &sman->owner_items);
+       return owner_item;
+
+out1:
+       kfree(owner_item);
+out:
+       return NULL;
+}
+
+struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
+                                   unsigned long size, unsigned alignment,
+                                   unsigned long owner)
+{
+       void *tmp;
+       struct drm_sman_mm *sman_mm;
+       struct drm_owner_item *owner_item;
+       struct drm_memblock_item *memblock;
+
+       BUG_ON(manager >= sman->num_managers);
+
+       sman_mm = &sman->mm[manager];
+       tmp = sman_mm->allocate(sman_mm->private, size, alignment);
+
+       if (!tmp) {
+               return NULL;
+       }
+
+       memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
+
+       if (!memblock)
+               goto out;
+
+       memblock->mm_info = tmp;
+       memblock->mm = sman_mm;
+       memblock->sman = sman;
+
+       if (drm_ht_just_insert_please
+           (&sman->user_hash_tab, &memblock->user_hash,
+            (unsigned long)memblock, 32, 0, 0))
+               goto out1;
+
+       owner_item = drm_sman_get_owner_item(sman, owner);
+       if (!owner_item)
+               goto out2;
+
+       list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
+
+       return memblock;
+
+out2:
+       drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
+out1:
+       kfree(memblock);
+out:
+       sman_mm->free(sman_mm->private, tmp);
+
+       return NULL;
+}
+
+EXPORT_SYMBOL(drm_sman_alloc);
+
+static void drm_sman_free(struct drm_memblock_item *item)
+{
+       struct drm_sman *sman = item->sman;
+
+       list_del(&item->owner_list);
+       drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
+       item->mm->free(item->mm->private, item->mm_info);
+       kfree(item);
+}
+
+int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
+{
+       struct drm_hash_item *hash_item;
+       struct drm_memblock_item *memblock_item;
+
+       if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
+               return -EINVAL;
+
+       memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
+                                      user_hash);
+       drm_sman_free(memblock_item);
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_free_key);
+
+static void drm_sman_remove_owner(struct drm_sman *sman,
+                                 struct drm_owner_item *owner_item)
+{
+       list_del(&owner_item->sman_list);
+       drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
+       kfree(owner_item);
+}
+
+int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
+{
+
+       struct drm_hash_item *hash_item;
+       struct drm_owner_item *owner_item;
+
+       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+               return -1;
+       }
+
+       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+       if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
+               drm_sman_remove_owner(sman, owner_item);
+               return -1;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_owner_clean);
+
+static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
+                                     struct drm_owner_item *owner_item)
+{
+       struct drm_memblock_item *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
+                                owner_list) {
+               drm_sman_free(entry);
+       }
+       drm_sman_remove_owner(sman, owner_item);
+}
+
+void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
+{
+
+       struct drm_hash_item *hash_item;
+       struct drm_owner_item *owner_item;
+
+       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+
+               return;
+       }
+
+       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+       drm_sman_do_owner_cleanup(sman, owner_item);
+}
+
+EXPORT_SYMBOL(drm_sman_owner_cleanup);
+
+void drm_sman_cleanup(struct drm_sman *sman)
+{
+       struct drm_owner_item *entry, *next;
+       unsigned int i;
+       struct drm_sman_mm *sman_mm;
+
+       list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
+               drm_sman_do_owner_cleanup(sman, entry);
+       }
+       if (sman->mm) {
+               for (i = 0; i < sman->num_managers; ++i) {
+                       sman_mm = &sman->mm[i];
+                       if (sman_mm->private) {
+                               sman_mm->destroy(sman_mm->private);
+                               sman_mm->private = NULL;
+                       }
+               }
+       }
+}
+
+EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_stub.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_stub.c
new file mode 100644 (file)
index 0000000..ad73e14
--- /dev/null
@@ -0,0 +1,559 @@
+/**
+ * \file drm_stub.h
+ * Stub support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include "drmP.h"
+#include "drm_core.h"
+
+unsigned int drm_debug = 0;    /* 1 to enable debug output */
+EXPORT_SYMBOL(drm_debug);
+
+MODULE_AUTHOR(CORE_AUTHOR);
+MODULE_DESCRIPTION(CORE_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_PARM_DESC(debug, "Enable debug output");
+
+module_param_named(debug, drm_debug, int, 0600);
+
+struct idr drm_minors_idr;
+
+struct class *drm_class;
+struct proc_dir_entry *drm_proc_root;
+struct dentry *drm_debugfs_root;
+void drm_ut_debug_printk(unsigned int request_level,
+                        const char *prefix,
+                        const char *function_name,
+                        const char *format, ...)
+{
+       va_list args;
+
+       if (drm_debug & request_level) {
+               if (function_name)
+                       printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
+               va_start(args, format);
+               vprintk(format, args);
+               va_end(args);
+       }
+}
+EXPORT_SYMBOL(drm_ut_debug_printk);
+static int drm_minor_get_id(struct drm_device *dev, int type)
+{
+       int new_id;
+       int ret;
+       int base = 0, limit = 63;
+
+       if (type == DRM_MINOR_CONTROL) {
+                base += 64;
+                limit = base + 127;
+        } else if (type == DRM_MINOR_RENDER) {
+                base += 128;
+                limit = base + 255;
+        }
+
+again:
+       if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+       mutex_lock(&dev->struct_mutex);
+       ret = idr_get_new_above(&drm_minors_idr, NULL,
+                               base, &new_id);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret == -EAGAIN) {
+               goto again;
+       } else if (ret) {
+               return ret;
+       }
+
+       if (new_id >= limit) {
+               idr_remove(&drm_minors_idr, new_id);
+               return -EINVAL;
+       }
+       return new_id;
+}
+
+struct drm_master *drm_master_create(struct drm_minor *minor)
+{
+       struct drm_master *master;
+
+       master = kzalloc(sizeof(*master), GFP_KERNEL);
+       if (!master)
+               return NULL;
+
+       kref_init(&master->refcount);
+       spin_lock_init(&master->lock.spinlock);
+       init_waitqueue_head(&master->lock.lock_queue);
+       drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
+       INIT_LIST_HEAD(&master->magicfree);
+       master->minor = minor;
+
+       list_add_tail(&master->head, &minor->master_list);
+
+       return master;
+}
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+       kref_get(&master->refcount);
+       return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct kref *kref)
+{
+       struct drm_master *master = container_of(kref, struct drm_master, refcount);
+       struct drm_magic_entry *pt, *next;
+       struct drm_device *dev = master->minor->dev;
+       struct drm_map_list *r_list, *list_temp;
+
+       list_del(&master->head);
+
+       if (dev->driver->master_destroy)
+               dev->driver->master_destroy(dev, master);
+
+       list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+               if (r_list->master == master) {
+                       drm_rmmap_locked(dev, r_list->map);
+                       r_list = NULL;
+               }
+       }
+
+       if (master->unique) {
+               kfree(master->unique);
+               master->unique = NULL;
+               master->unique_len = 0;
+       }
+
+       list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+               list_del(&pt->head);
+               drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+               kfree(pt);
+       }
+
+       drm_ht_remove(&master->magiclist);
+
+       kfree(master);
+}
+
+void drm_master_put(struct drm_master **master)
+{
+       kref_put(&(*master)->refcount, drm_master_destroy);
+       *master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
+
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       int ret = 0;
+
+       if (file_priv->is_master)
+               return 0;
+
+       if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
+               return -EINVAL;
+
+       if (!file_priv->master)
+               return -EINVAL;
+
+       if (!file_priv->minor->master &&
+           file_priv->minor->master != file_priv->master) {
+               mutex_lock(&dev->struct_mutex);
+               file_priv->minor->master = drm_master_get(file_priv->master);
+               file_priv->is_master = 1;
+               if (dev->driver->master_set) {
+                       ret = dev->driver->master_set(dev, file_priv, false);
+                       if (unlikely(ret != 0)) {
+                               file_priv->is_master = 0;
+                               drm_master_put(&file_priv->minor->master);
+                       }
+               }
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       return 0;
+}
+
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       if (!file_priv->is_master)
+               return -EINVAL;
+
+       if (!file_priv->minor->master)
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+       if (dev->driver->master_drop)
+               dev->driver->master_drop(dev, file_priv, false);
+       drm_master_put(&file_priv->minor->master);
+       file_priv->is_master = 0;
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
+                          const struct pci_device_id *ent,
+                          struct drm_driver *driver)
+{
+       int retcode;
+
+       INIT_LIST_HEAD(&dev->filelist);
+       INIT_LIST_HEAD(&dev->ctxlist);
+       INIT_LIST_HEAD(&dev->vmalist);
+       INIT_LIST_HEAD(&dev->maplist);
+       INIT_LIST_HEAD(&dev->vblank_event_list);
+
+       spin_lock_init(&dev->count_lock);
+       spin_lock_init(&dev->drw_lock);
+       spin_lock_init(&dev->event_lock);
+       init_timer(&dev->timer);
+       mutex_init(&dev->struct_mutex);
+       mutex_init(&dev->ctxlist_mutex);
+
+       idr_init(&dev->drw_idr);
+
+       dev->pdev = pdev;
+       dev->pci_device = pdev->device;
+       dev->pci_vendor = pdev->vendor;
+
+#ifdef __alpha__
+       dev->hose = pdev->sysdata;
+#endif
+
+       if (drm_ht_create(&dev->map_hash, 12)) {
+               return -ENOMEM;
+       }
+
+       /* the DRM has 6 basic counters */
+       dev->counters = 6;
+       dev->types[0] = _DRM_STAT_LOCK;
+       dev->types[1] = _DRM_STAT_OPENS;
+       dev->types[2] = _DRM_STAT_CLOSES;
+       dev->types[3] = _DRM_STAT_IOCTLS;
+       dev->types[4] = _DRM_STAT_LOCKS;
+       dev->types[5] = _DRM_STAT_UNLOCKS;
+
+       dev->driver = driver;
+
+       if (drm_core_has_AGP(dev)) {
+               if (drm_device_is_agp(dev))
+                       dev->agp = drm_agp_init(dev);
+               if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+                   && (dev->agp == NULL)) {
+                       DRM_ERROR("Cannot initialize the agpgart module.\n");
+                       retcode = -EINVAL;
+                       goto error_out_unreg;
+               }
+               if (drm_core_has_MTRR(dev)) {
+                       if (dev->agp)
+                               dev->agp->agp_mtrr =
+                                   mtrr_add(dev->agp->agp_info.aper_base,
+                                            dev->agp->agp_info.aper_size *
+                                            1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+               }
+       }
+
+
+       retcode = drm_ctxbitmap_init(dev);
+       if (retcode) {
+               DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+               goto error_out_unreg;
+       }
+
+       if (driver->driver_features & DRIVER_GEM) {
+               retcode = drm_gem_init(dev);
+               if (retcode) {
+                       DRM_ERROR("Cannot initialize graphics execution "
+                                 "manager (GEM)\n");
+                       goto error_out_unreg;
+               }
+       }
+
+       return 0;
+
+      error_out_unreg:
+       drm_lastclose(dev);
+       return retcode;
+}
+
+
+/**
+ * Get a secondary minor number.
+ *
+ * \param dev device data structure
+ * \param sec-minor structure to hold the assigned minor
+ * \return negative number on failure.
+ *
+ * Search an empty entry and initialize it to the given parameters, and
+ * create the proc init entry via proc_init(). This routines assigns
+ * minor numbers to secondary heads of multi-headed cards
+ */
+static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+{
+       struct drm_minor *new_minor;
+       int ret;
+       int minor_id;
+
+       DRM_DEBUG("\n");
+
+       minor_id = drm_minor_get_id(dev, type);
+       if (minor_id < 0)
+               return minor_id;
+
+       new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
+       if (!new_minor) {
+               ret = -ENOMEM;
+               goto err_idr;
+       }
+
+       new_minor->type = type;
+       new_minor->device = MKDEV(DRM_MAJOR, minor_id);
+       new_minor->dev = dev;
+       new_minor->index = minor_id;
+       INIT_LIST_HEAD(&new_minor->master_list);
+
+       idr_replace(&drm_minors_idr, new_minor, minor_id);
+
+       if (type == DRM_MINOR_LEGACY) {
+               ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
+               if (ret) {
+                       DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
+                       goto err_mem;
+               }
+       } else
+               new_minor->proc_root = NULL;
+
+#if defined(CONFIG_DEBUG_FS)
+       ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
+       if (ret) {
+               DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
+               goto err_g2;
+       }
+#endif
+
+       ret = drm_sysfs_device_add(new_minor);
+       if (ret) {
+               printk(KERN_ERR
+                      "DRM: Error sysfs_device_add.\n");
+               goto err_g2;
+       }
+       *minor = new_minor;
+
+       DRM_DEBUG("new minor assigned %d\n", minor_id);
+       return 0;
+
+
+err_g2:
+       if (new_minor->type == DRM_MINOR_LEGACY)
+               drm_proc_cleanup(new_minor, drm_proc_root);
+err_mem:
+       kfree(new_minor);
+err_idr:
+       idr_remove(&drm_minors_idr, minor_id);
+       *minor = NULL;
+       return ret;
+}
+
+/**
+ * Register.
+ *
+ * \param pdev - PCI device structure
+ * \param ent entry from the PCI ID table with device type flags
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+               struct drm_driver *driver)
+{
+       struct drm_device *dev;
+       int ret;
+
+       DRM_DEBUG("\n");
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               goto err_g1;
+
+       pci_set_master(pdev);
+       if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
+               printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+               goto err_g2;
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               pci_set_drvdata(pdev, dev);
+               ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+               if (ret)
+                       goto err_g2;
+       }
+
+       if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
+               goto err_g3;
+
+       if (dev->driver->load) {
+               ret = dev->driver->load(dev, ent->driver_data);
+               if (ret)
+                       goto err_g4;
+       }
+
+        /* setup the grouping for the legacy output */
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
+               if (ret)
+                       goto err_g4;
+       }
+
+       list_add_tail(&dev->driver_item, &driver->device_list);
+
+       DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+                driver->name, driver->major, driver->minor, driver->patchlevel,
+                driver->date, pci_name(pdev), dev->primary->index);
+
+       return 0;
+
+err_g4:
+       drm_put_minor(&dev->primary);
+err_g3:
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_put_minor(&dev->control);
+err_g2:
+       pci_disable_device(pdev);
+err_g1:
+       kfree(dev);
+       return ret;
+}
+EXPORT_SYMBOL(drm_get_dev);
+
+/**
+ * Put a secondary minor number.
+ *
+ * \param sec_minor - structure to be released
+ * \return always zero
+ *
+ * Cleans up the proc resources. Not legal for this to be the
+ * last minor released.
+ *
+ */
+int drm_put_minor(struct drm_minor **minor_p)
+{
+       struct drm_minor *minor = *minor_p;
+
+       DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+       if (minor->type == DRM_MINOR_LEGACY)
+               drm_proc_cleanup(minor, drm_proc_root);
+#if defined(CONFIG_DEBUG_FS)
+       drm_debugfs_cleanup(minor);
+#endif
+
+       drm_sysfs_device_remove(minor);
+
+       idr_remove(&drm_minors_idr, minor->index);
+
+       kfree(minor);
+       *minor_p = NULL;
+       return 0;
+}
+
+/**
+ * Called via drm_exit() at module unload time or when pci device is
+ * unplugged.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
+ *
+ * \sa drm_init
+ */
+void drm_put_dev(struct drm_device *dev)
+{
+       struct drm_driver *driver;
+       struct drm_map_list *r_list, *list_temp;
+
+       DRM_DEBUG("\n");
+
+       if (!dev) {
+               DRM_ERROR("cleanup called no dev\n");
+               return;
+       }
+       driver = dev->driver;
+
+       drm_vblank_cleanup(dev);
+
+       drm_lastclose(dev);
+
+       if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->agp_mtrr >= 0) {
+               int retval;
+               retval = mtrr_del(dev->agp->agp_mtrr,
+                                 dev->agp->agp_info.aper_base,
+                                 dev->agp->agp_info.aper_size * 1024 * 1024);
+               DRM_DEBUG("mtrr_del=%d\n", retval);
+       }
+
+       if (dev->driver->unload)
+               dev->driver->unload(dev);
+
+       if (drm_core_has_AGP(dev) && dev->agp) {
+               kfree(dev->agp);
+               dev->agp = NULL;
+       }
+
+       list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+               drm_rmmap(dev, r_list->map);
+       drm_ht_remove(&dev->map_hash);
+
+       drm_ctxbitmap_cleanup(dev);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_put_minor(&dev->control);
+
+       if (driver->driver_features & DRIVER_GEM)
+               drm_gem_destroy(dev);
+
+       drm_put_minor(&dev->primary);
+
+       if (dev->devname) {
+               kfree(dev->devname);
+               dev->devname = NULL;
+       }
+       kfree(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_sysfs.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_sysfs.c
new file mode 100644 (file)
index 0000000..7e42b7e
--- /dev/null
@@ -0,0 +1,549 @@
+
+/*
+ * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
+ *               extra sysfs attribute from DRM. Normal drm_sysfs_class
+ *               does not allow adding attributes.
+ *
+ * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2003-2004 IBM Corp.
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+
+#include "drm_sysfs.h"
+#include "drm_core.h"
+#include "drmP.h"
+
+#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
+#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+
+static struct device_type drm_sysfs_device_minor = {
+       .name = "drm_minor"
+};
+
+/**
+ * drm_class_suspend - DRM class suspend hook
+ * @dev: Linux device to suspend
+ * @state: power state to enter
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its suspend hook, if present.
+ */
+static int drm_class_suspend(struct device *dev, pm_message_t state)
+{
+       if (dev->type == &drm_sysfs_device_minor) {
+               struct drm_minor *drm_minor = to_drm_minor(dev);
+               struct drm_device *drm_dev = drm_minor->dev;
+
+               if (drm_minor->type == DRM_MINOR_LEGACY &&
+                   !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
+                   drm_dev->driver->suspend)
+                       return drm_dev->driver->suspend(drm_dev, state);
+       }
+       return 0;
+}
+
+/**
+ * drm_class_resume - DRM class resume hook
+ * @dev: Linux device to resume
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its resume hook, if present.
+ */
+static int drm_class_resume(struct device *dev)
+{
+       if (dev->type == &drm_sysfs_device_minor) {
+               struct drm_minor *drm_minor = to_drm_minor(dev);
+               struct drm_device *drm_dev = drm_minor->dev;
+
+               if (drm_minor->type == DRM_MINOR_LEGACY &&
+                   !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
+                   drm_dev->driver->resume)
+                       return drm_dev->driver->resume(drm_dev);
+       }
+       return 0;
+}
+
+/* Display the version of drm_core. This doesn't work right in current design */
+static ssize_t version_show(struct class *dev, char *buf)
+{
+       return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
+                      CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+}
+
+static char *drm_devnode(struct device *dev, mode_t *mode)
+{
+       return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
+}
+
+static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
+
+/**
+ * drm_sysfs_create - create a struct drm_sysfs_class structure
+ * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
+ * @name: pointer to a string for the name of this class.
+ *
+ * This is used to create DRM class pointer that can then be used
+ * in calls to drm_sysfs_device_add().
+ *
+ * Note, the pointer created here is to be destroyed when finished by making a
+ * call to drm_sysfs_destroy().
+ */
+struct class *drm_sysfs_create(struct module *owner, char *name)
+{
+       struct class *class;
+       int err;
+
+       class = class_create(owner, name);
+       if (IS_ERR(class)) {
+               err = PTR_ERR(class);
+               goto err_out;
+       }
+
+       class->suspend = drm_class_suspend;
+       class->resume = drm_class_resume;
+
+       err = class_create_file(class, &class_attr_version);
+       if (err)
+               goto err_out_class;
+
+       class->devnode = drm_devnode;
+
+       return class;
+
+err_out_class:
+       class_destroy(class);
+err_out:
+       return ERR_PTR(err);
+}
+
+/**
+ * drm_sysfs_destroy - destroys DRM class
+ *
+ * Destroy the DRM device class.
+ */
+void drm_sysfs_destroy(void)
+{
+       if ((drm_class == NULL) || (IS_ERR(drm_class)))
+               return;
+       class_remove_file(drm_class, &class_attr_version);
+       class_destroy(drm_class);
+}
+
+/**
+ * drm_sysfs_device_release - do nothing
+ * @dev: Linux device
+ *
+ * Normally, this would free the DRM device associated with @dev, along
+ * with cleaning up any other stuff.  But we do that in the DRM core, so
+ * this function can just return and hope that the core does its job.
+ */
+static void drm_sysfs_device_release(struct device *dev)
+{
+       memset(dev, 0, sizeof(struct device));
+       return;
+}
+
+/*
+ * Connector properties
+ */
+static ssize_t status_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       enum drm_connector_status status;
+
+       status = connector->funcs->detect(connector);
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       drm_get_connector_status_name(status));
+}
+
+static ssize_t dpms_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       uint64_t dpms_status;
+       int ret;
+
+       ret = drm_connector_property_get_value(connector,
+                                           dev->mode_config.dpms_property,
+                                           &dpms_status);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       drm_get_dpms_name((int)dpms_status));
+}
+
+static ssize_t enabled_show(struct device *device,
+                           struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
+                       "disabled");
+}
+
+static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
+                        char *buf, loff_t off, size_t count)
+{
+       struct device *connector_dev = container_of(kobj, struct device, kobj);
+       struct drm_connector *connector = to_drm_connector(connector_dev);
+       unsigned char *edid;
+       size_t size;
+
+       if (!connector->edid_blob_ptr)
+               return 0;
+
+       edid = connector->edid_blob_ptr->data;
+       size = connector->edid_blob_ptr->length;
+       if (!edid)
+               return 0;
+
+       if (off >= size)
+               return 0;
+
+       if (off + count > size)
+               count = size - off;
+       memcpy(buf, edid + off, count);
+
+       return count;
+}
+
+static ssize_t modes_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_display_mode *mode;
+       int written = 0;
+
+       list_for_each_entry(mode, &connector->modes, head) {
+               written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+                                   mode->name);
+       }
+
+       return written;
+}
+
+static ssize_t subconnector_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       struct drm_property *prop = NULL;
+       uint64_t subconnector;
+       int is_tv = 0;
+       int ret;
+
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+                       prop = dev->mode_config.dvi_i_subconnector_property;
+                       break;
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+               case DRM_MODE_CONNECTOR_TV:
+                       prop = dev->mode_config.tv_subconnector_property;
+                       is_tv = 1;
+                       break;
+               default:
+                       DRM_ERROR("Wrong connector type for this property\n");
+                       return 0;
+       }
+
+       if (!prop) {
+               DRM_ERROR("Unable to find subconnector property\n");
+               return 0;
+       }
+
+       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+                       drm_get_tv_subconnector_name((int)subconnector) :
+                       drm_get_dvi_i_subconnector_name((int)subconnector));
+}
+
+static ssize_t select_subconnector_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       struct drm_property *prop = NULL;
+       uint64_t subconnector;
+       int is_tv = 0;
+       int ret;
+
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+                       prop = dev->mode_config.dvi_i_select_subconnector_property;
+                       break;
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+               case DRM_MODE_CONNECTOR_TV:
+                       prop = dev->mode_config.tv_select_subconnector_property;
+                       is_tv = 1;
+                       break;
+               default:
+                       DRM_ERROR("Wrong connector type for this property\n");
+                       return 0;
+       }
+
+       if (!prop) {
+               DRM_ERROR("Unable to find select subconnector property\n");
+               return 0;
+       }
+
+       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+                       drm_get_tv_select_name((int)subconnector) :
+                       drm_get_dvi_i_select_name((int)subconnector));
+}
+
+static struct device_attribute connector_attrs[] = {
+       __ATTR_RO(status),
+       __ATTR_RO(enabled),
+       __ATTR_RO(dpms),
+       __ATTR_RO(modes),
+};
+
+/* These attributes are for both DVI-I connectors and all types of tv-out. */
+static struct device_attribute connector_attrs_opt1[] = {
+       __ATTR_RO(subconnector),
+       __ATTR_RO(select_subconnector),
+};
+
+static struct bin_attribute edid_attr = {
+       .attr.name = "edid",
+       .attr.mode = 0444,
+       .size = 128,
+       .read = edid_show,
+};
+
+/**
+ * drm_sysfs_connector_add - add an connector to sysfs
+ * @connector: connector to add
+ *
+ * Create an connector device in sysfs, along with its associated connector
+ * properties (so far, connection status, dpms, mode list & edid) and
+ * generate a hotplug event so userspace knows there's a new connector
+ * available.
+ *
+ * Note:
+ * This routine should only be called *once* for each DRM minor registered.
+ * A second call for an already registered device will trigger the BUG_ON
+ * below.
+ */
+int drm_sysfs_connector_add(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       int ret = 0, i, j;
+
+       /* We shouldn't get called more than once for the same connector */
+       BUG_ON(device_is_registered(&connector->kdev));
+
+       connector->kdev.parent = &dev->primary->kdev;
+       connector->kdev.class = drm_class;
+       connector->kdev.release = drm_sysfs_device_release;
+
+       DRM_DEBUG("adding \"%s\" to sysfs\n",
+                 drm_get_connector_name(connector));
+
+       dev_set_name(&connector->kdev, "card%d-%s",
+                    dev->primary->index, drm_get_connector_name(connector));
+       ret = device_register(&connector->kdev);
+
+       if (ret) {
+               DRM_ERROR("failed to register connector device: %d\n", ret);
+               goto out;
+       }
+
+       /* Standard attributes */
+
+       for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
+               ret = device_create_file(&connector->kdev, &connector_attrs[i]);
+               if (ret)
+                       goto err_out_files;
+       }
+
+       /* Optional attributes */
+       /*
+        * In the long run it maybe a good idea to make one set of
+        * optionals per connector type.
+        */
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+               case DRM_MODE_CONNECTOR_TV:
+                       for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
+                               ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
+                               if (ret)
+                                       goto err_out_files;
+                       }
+                       break;
+               default:
+                       break;
+       }
+
+       ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+       if (ret)
+               goto err_out_files;
+
+       /* Let userspace know we have a new connector */
+       drm_sysfs_hotplug_event(dev);
+
+       return 0;
+
+err_out_files:
+       if (i > 0)
+               for (j = 0; j < i; j++)
+                       device_remove_file(&connector->kdev,
+                                          &connector_attrs[i]);
+       device_unregister(&connector->kdev);
+
+out:
+       return ret;
+}
+EXPORT_SYMBOL(drm_sysfs_connector_add);
+
+/**
+ * drm_sysfs_connector_remove - remove an connector device from sysfs
+ * @connector: connector to remove
+ *
+ * Remove @connector and its associated attributes from sysfs.  Note that
+ * the device model core will take care of sending the "remove" uevent
+ * at this time, so we don't need to do it.
+ *
+ * Note:
+ * This routine should only be called if the connector was previously
+ * successfully registered.  If @connector hasn't been registered yet,
+ * you'll likely see a panic somewhere deep in sysfs code when called.
+ */
+void drm_sysfs_connector_remove(struct drm_connector *connector)
+{
+       int i;
+
+       DRM_DEBUG("removing \"%s\" from sysfs\n",
+                 drm_get_connector_name(connector));
+
+       for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
+               device_remove_file(&connector->kdev, &connector_attrs[i]);
+       sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
+       device_unregister(&connector->kdev);
+}
+EXPORT_SYMBOL(drm_sysfs_connector_remove);
+
+/**
+ * drm_sysfs_hotplug_event - generate a DRM uevent
+ * @dev: DRM device
+ *
+ * Send a uevent for the DRM device specified by @dev.  Currently we only
+ * set HOTPLUG=1 in the uevent environment, but this could be expanded to
+ * deal with other types of events.
+ */
+void drm_sysfs_hotplug_event(struct drm_device *dev)
+{
+       char *event_string = "HOTPLUG=1";
+       char *envp[] = { event_string, NULL };
+
+       DRM_DEBUG("generating hotplug event\n");
+
+       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+
+/**
+ * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+ * @dev: DRM device to be added
+ * @head: DRM head in question
+ *
+ * Add a DRM device to the DRM's device model class.  We use @dev's PCI device
+ * as the parent for the Linux device, and make sure it has a file containing
+ * the driver we're using (for userspace compatibility).
+ */
+int drm_sysfs_device_add(struct drm_minor *minor)
+{
+       int err;
+       char *minor_str;
+
+       minor->kdev.parent = &minor->dev->pdev->dev;
+       minor->kdev.class = drm_class;
+       minor->kdev.release = drm_sysfs_device_release;
+       minor->kdev.devt = minor->device;
+       minor->kdev.type = &drm_sysfs_device_minor;
+       if (minor->type == DRM_MINOR_CONTROL)
+               minor_str = "controlD%d";
+        else if (minor->type == DRM_MINOR_RENDER)
+                minor_str = "renderD%d";
+        else
+                minor_str = "card%d";
+
+       dev_set_name(&minor->kdev, minor_str, minor->index);
+
+       err = device_register(&minor->kdev);
+       if (err) {
+               DRM_ERROR("device add failed: %d\n", err);
+               goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       return err;
+}
+
+/**
+ * drm_sysfs_device_remove - remove DRM device
+ * @dev: DRM device to remove
+ *
+ * This call unregisters and cleans up a class device that was created with a
+ * call to drm_sysfs_device_add()
+ */
+void drm_sysfs_device_remove(struct drm_minor *minor)
+{
+       device_unregister(&minor->kdev);
+}
+
+
+/**
+ * drm_class_device_register - Register a struct device in the drm class.
+ *
+ * @dev: pointer to struct device to register.
+ *
+ * @dev should have all relevant members pre-filled with the exception
+ * of the class member. In particular, the device_type member must
+ * be set.
+ */
+
+int drm_class_device_register(struct device *dev)
+{
+       dev->class = drm_class;
+       return device_register(dev);
+}
+EXPORT_SYMBOL_GPL(drm_class_device_register);
+
+void drm_class_device_unregister(struct device *dev)
+{
+       return device_unregister(dev);
+}
+EXPORT_SYMBOL_GPL(drm_class_device_unregister);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_vm.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_debug_drm/drm_vm.c
new file mode 100644 (file)
index 0000000..4ac900f
--- /dev/null
@@ -0,0 +1,678 @@
+/**
+ * \file drm_vm.c
+ * Memory mapping for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#if defined(__ia64__)
+#include <linux/efi.h>
+#endif
+
+static void drm_vm_open(struct vm_area_struct *vma);
+static void drm_vm_close(struct vm_area_struct *vma);
+
+static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+       pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__i386__) || defined(__x86_64__)
+       if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+               pgprot_val(tmp) |= _PAGE_PCD;
+               pgprot_val(tmp) &= ~_PAGE_PWT;
+       }
+#elif defined(__powerpc__)
+       pgprot_val(tmp) |= _PAGE_NO_CACHE;
+       if (map_type == _DRM_REGISTERS)
+               pgprot_val(tmp) |= _PAGE_GUARDED;
+#elif defined(__ia64__)
+       if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+                                   vma->vm_start))
+               tmp = pgprot_writecombine(tmp);
+       else
+               tmp = pgprot_noncached(tmp);
+#elif defined(__sparc__)
+       tmp = pgprot_noncached(tmp);
+#endif
+       return tmp;
+}
+
+static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+       pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+       tmp |= _PAGE_NO_CACHE;
+#endif
+       return tmp;
+}
+
+/**
+ * \c fault method for AGP virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Find the right map and if it's AGP memory find the real physical page to
+ * map, get the page, increment the use count and return it.
+ */
+#if __OS_HAS_AGP
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_local_map *map = NULL;
+       struct drm_map_list *r_list;
+       struct drm_hash_item *hash;
+
+       /*
+        * Find the right map
+        */
+       if (!drm_core_has_AGP(dev))
+               goto vm_fault_error;
+
+       if (!dev->agp || !dev->agp->cant_use_aperture)
+               goto vm_fault_error;
+
+       if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
+               goto vm_fault_error;
+
+       r_list = drm_hash_entry(hash, struct drm_map_list, hash);
+       map = r_list->map;
+
+       if (map && map->type == _DRM_AGP) {
+               /*
+                * Using vm_pgoff as a selector forces us to use this unusual
+                * addressing scheme.
+                */
+               resource_size_t offset = (unsigned long)vmf->virtual_address -
+                       vma->vm_start;
+               resource_size_t baddr = map->offset + offset;
+               struct drm_agp_mem *agpmem;
+               struct page *page;
+
+#ifdef __alpha__
+               /*
+                * Adjust to a bus-relative address
+                */
+               baddr -= dev->hose->mem_space->start;
+#endif
+
+               /*
+                * It's AGP memory - find the real physical page to map
+                */
+               list_for_each_entry(agpmem, &dev->agp->memory, head) {
+                       if (agpmem->bound <= baddr &&
+                           agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
+                               break;
+               }
+
+               if (!agpmem)
+                       goto vm_fault_error;
+
+               /*
+                * Get the page, inc the use count, and return it
+                */
+               offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
+               page = agpmem->memory->pages[offset];
+               get_page(page);
+               vmf->page = page;
+
+               DRM_DEBUG
+                   ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
+                    (unsigned long long)baddr,
+                    agpmem->memory->pages[offset],
+                    (unsigned long long)offset,
+                    page_count(page));
+               return 0;
+       }
+vm_fault_error:
+       return VM_FAULT_SIGBUS; /* Disallow mremap */
+}
+#else                          /* __OS_HAS_AGP */
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+#endif                         /* __OS_HAS_AGP */
+
+/**
+ * \c nopage method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Get the mapping, find the real physical page to map, get the page, and
+ * return it.
+ */
+static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_local_map *map = vma->vm_private_data;
+       unsigned long offset;
+       unsigned long i;
+       struct page *page;
+
+       if (!map)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+       i = (unsigned long)map->handle + offset;
+       page = vmalloc_to_page((void *)i);
+       if (!page)
+               return VM_FAULT_SIGBUS;
+       get_page(page);
+       vmf->page = page;
+
+       DRM_DEBUG("shm_fault 0x%lx\n", offset);
+       return 0;
+}
+
+/**
+ * \c close method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Deletes map information if we are the last
+ * person to close a mapping and it's not in the global maplist.
+ */
+static void drm_vm_shm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_vma_entry *pt, *temp;
+       struct drm_local_map *map;
+       struct drm_map_list *r_list;
+       int found_maps = 0;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_dec(&dev->vma_count);
+
+       map = vma->vm_private_data;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+               if (pt->vma->vm_private_data == map)
+                       found_maps++;
+               if (pt->vma == vma) {
+                       list_del(&pt->head);
+                       kfree(pt);
+               }
+       }
+
+       /* We were the only map that was found */
+       if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
+               /* Check to see if we are in the maplist, if we are not, then
+                * we delete this mappings information.
+                */
+               found_maps = 0;
+               list_for_each_entry(r_list, &dev->maplist, head) {
+                       if (r_list->map == map)
+                               found_maps++;
+               }
+
+               if (!found_maps) {
+                       drm_dma_handle_t dmah;
+
+                       switch (map->type) {
+                       case _DRM_REGISTERS:
+                       case _DRM_FRAME_BUFFER:
+                               if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+                                       int retcode;
+                                       retcode = mtrr_del(map->mtrr,
+                                                          map->offset,
+                                                          map->size);
+                                       DRM_DEBUG("mtrr_del = %d\n", retcode);
+                               }
+                               iounmap(map->handle);
+                               break;
+                       case _DRM_SHM:
+                               vfree(map->handle);
+                               break;
+                       case _DRM_AGP:
+                       case _DRM_SCATTER_GATHER:
+                               break;
+                       case _DRM_CONSISTENT:
+                               dmah.vaddr = map->handle;
+                               dmah.busaddr = map->offset;
+                               dmah.size = map->size;
+                               __drm_pci_free(dev, &dmah);
+                               break;
+                       case _DRM_GEM:
+                               DRM_ERROR("tried to rmmap GEM object\n");
+                               break;
+                       }
+                       kfree(map);
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c fault method for DMA virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
+ */
+static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_device_dma *dma = dev->dma;
+       unsigned long offset;
+       unsigned long page_nr;
+       struct page *page;
+
+       if (!dma)
+               return VM_FAULT_SIGBUS; /* Error */
+       if (!dma->pagelist)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
+       page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
+       page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+
+       get_page(page);
+       vmf->page = page;
+
+       DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
+       return 0;
+}
+
+/**
+ * \c fault method for scatter-gather virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
+ */
+static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_local_map *map = vma->vm_private_data;
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_sg_mem *entry = dev->sg;
+       unsigned long offset;
+       unsigned long map_offset;
+       unsigned long page_offset;
+       struct page *page;
+
+       if (!entry)
+               return VM_FAULT_SIGBUS; /* Error */
+       if (!entry->pagelist)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+       map_offset = map->offset - (unsigned long)dev->sg->virtual;
+       page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
+       page = entry->pagelist[page_offset];
+       get_page(page);
+       vmf->page = page;
+
+       return 0;
+}
+
+static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_fault(vma, vmf);
+}
+
+static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_shm_fault(vma, vmf);
+}
+
+static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_dma_fault(vma, vmf);
+}
+
+static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_sg_fault(vma, vmf);
+}
+
+/** AGP virtual memory operations */
+static const struct vm_operations_struct drm_vm_ops = {
+       .fault = drm_vm_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/** Shared virtual memory operations */
+static const struct vm_operations_struct drm_vm_shm_ops = {
+       .fault = drm_vm_shm_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_shm_close,
+};
+
+/** DMA virtual memory operations */
+static const struct vm_operations_struct drm_vm_dma_ops = {
+       .fault = drm_vm_dma_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/** Scatter-gather virtual memory operations */
+static const struct vm_operations_struct drm_vm_sg_ops = {
+       .fault = drm_vm_sg_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/**
+ * \c open method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Create a new drm_vma_entry structure as the \p vma private data entry and
+ * add it to drm_device::vmalist.
+ */
+void drm_vm_open_locked(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_vma_entry *vma_entry;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_inc(&dev->vma_count);
+
+       vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
+       if (vma_entry) {
+               vma_entry->vma = vma;
+               vma_entry->pid = current->pid;
+               list_add(&vma_entry->head, &dev->vmalist);
+       }
+}
+
+static void drm_vm_open(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       drm_vm_open_locked(vma);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c close method for all virtual memory types.
+ *
+ * \param vma virtual memory area.
+ *
+ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+ * free it.
+ */
+static void drm_vm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_vma_entry *pt, *temp;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_dec(&dev->vma_count);
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+               if (pt->vma == vma) {
+                       list_del(&pt->head);
+                       kfree(pt);
+                       break;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * Sets the virtual memory area operations structure to vm_dma_ops, the file
+ * pointer, and calls vm_open().
+ */
+static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev;
+       struct drm_device_dma *dma;
+       unsigned long length = vma->vm_end - vma->vm_start;
+
+       dev = priv->minor->dev;
+       dma = dev->dma;
+       DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+                 vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+       /* Length must match exact page count */
+       if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
+               return -EINVAL;
+       }
+
+       if (!capable(CAP_SYS_ADMIN) &&
+           (dma->flags & _DRM_DMA_USE_PCI_RO)) {
+               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+               pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+               /* Ye gads this is ugly.  With more thought
+                  we could move this up higher and use
+                  `protection_map' instead.  */
+               vma->vm_page_prot =
+                   __pgprot(pte_val
+                            (pte_wrprotect
+                             (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+       }
+
+       vma->vm_ops = &drm_vm_dma_ops;
+
+       vma->vm_flags |= VM_RESERVED;   /* Don't swap */
+       vma->vm_flags |= VM_DONTEXPAND;
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+       return 0;
+}
+
+resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
+{
+       return map->offset;
+}
+
+EXPORT_SYMBOL(drm_core_get_map_ofs);
+
+resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
+{
+#ifdef __alpha__
+       return dev->hose->dense_mem_base - dev->hose->mem_space->start;
+#else
+       return 0;
+#endif
+}
+
+EXPORT_SYMBOL(drm_core_get_reg_ofs);
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the virtual memory area has no offset associated with it then it's a DMA
+ * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
+ * checks that the restricted flag is not set, sets the virtual memory operations
+ * according to the mapping type and remaps the pages. Finally sets the file
+ * pointer and calls vm_open().
+ */
+int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_local_map *map = NULL;
+       resource_size_t offset = 0;
+       struct drm_hash_item *hash;
+
+       DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+                 vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+       if (!priv->authenticated)
+               return -EACCES;
+
+       /* We check for "dma". On Apple's UniNorth, it's valid to have
+        * the AGP mapped at physical address 0
+        * --BenH.
+        */
+       if (!vma->vm_pgoff
+#if __OS_HAS_AGP
+           && (!dev->agp
+               || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
+#endif
+           )
+               return drm_mmap_dma(filp, vma);
+
+       if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
+               DRM_ERROR("Could not find map\n");
+               return -EINVAL;
+       }
+
+       map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+       if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
+               return -EPERM;
+
+       /* Check for valid size. */
+       if (map->size < vma->vm_end - vma->vm_start)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
+               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+               pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+               /* Ye gads this is ugly.  With more thought
+                  we could move this up higher and use
+                  `protection_map' instead.  */
+               vma->vm_page_prot =
+                   __pgprot(pte_val
+                            (pte_wrprotect
+                             (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+       }
+
+       switch (map->type) {
+       case _DRM_AGP:
+               if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+                       /*
+                        * On some platforms we can't talk to bus dma address from the CPU, so for
+                        * memory of type DRM_AGP, we'll deal with sorting out the real physical
+                        * pages and mappings in fault()
+                        */
+#if defined(__powerpc__)
+                       pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+#endif
+                       vma->vm_ops = &drm_vm_ops;
+                       break;
+               }
+               /* fall through to _DRM_FRAME_BUFFER... */
+       case _DRM_FRAME_BUFFER:
+       case _DRM_REGISTERS:
+               offset = dev->driver->get_reg_ofs(dev);
+               vma->vm_flags |= VM_IO; /* not in core dump */
+               vma->vm_page_prot = drm_io_prot(map->type, vma);
+               if (io_remap_pfn_range(vma, vma->vm_start,
+                                      (map->offset + offset) >> PAGE_SHIFT,
+                                      vma->vm_end - vma->vm_start,
+                                      vma->vm_page_prot))
+                       return -EAGAIN;
+               DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
+                         " offset = 0x%llx\n",
+                         map->type,
+                         vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
+               vma->vm_ops = &drm_vm_ops;
+               break;
+       case _DRM_CONSISTENT:
+               /* Consistent memory is really like shared memory. But
+                * it's allocated in a different way, so avoid fault */
+               if (remap_pfn_range(vma, vma->vm_start,
+                   page_to_pfn(virt_to_page(map->handle)),
+                   vma->vm_end - vma->vm_start, vma->vm_page_prot))
+                       return -EAGAIN;
+               vma->vm_page_prot = drm_dma_prot(map->type, vma);
+       /* fall through to _DRM_SHM */
+       case _DRM_SHM:
+               vma->vm_ops = &drm_vm_shm_ops;
+               vma->vm_private_data = (void *)map;
+               /* Don't let this area swap.  Change when
+                  DRM_KERNEL advisory is supported. */
+               vma->vm_flags |= VM_RESERVED;
+               break;
+       case _DRM_SCATTER_GATHER:
+               vma->vm_ops = &drm_vm_sg_ops;
+               vma->vm_private_data = (void *)map;
+               vma->vm_flags |= VM_RESERVED;
+               vma->vm_page_prot = drm_dma_prot(map->type, vma);
+               break;
+       default:
+               return -EINVAL; /* This should never happen. */
+       }
+       vma->vm_flags |= VM_RESERVED;   /* Don't swap */
+       vma->vm_flags |= VM_DONTEXPAND;
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+       return 0;
+}
+
+int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_mmap_locked(filp, vma);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_mmap);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/ati_pcigart.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/ati_pcigart.c
new file mode 100644 (file)
index 0000000..628eae3
--- /dev/null
@@ -0,0 +1,195 @@
+/**
+ * \file ati_pcigart.c
+ * ATI PCI GART support
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+# define ATI_PCIGART_PAGE_SIZE         4096    /**< PCI GART page size */
+
+static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
+                                      struct drm_ati_pcigart_info *gart_info)
+{
+       gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
+                                               PAGE_SIZE,
+                                               gart_info->table_mask);
+       if (gart_info->table_handle == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void drm_ati_free_pcigart_table(struct drm_device *dev,
+                                      struct drm_ati_pcigart_info *gart_info)
+{
+       drm_pci_free(dev, gart_info->table_handle);
+       gart_info->table_handle = NULL;
+}
+
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+       struct drm_sg_mem *entry = dev->sg;
+       unsigned long pages;
+       int i;
+       int max_pages;
+
+       /* we need to support large memory configurations */
+       if (!entry) {
+               DRM_ERROR("no scatter/gather memory!\n");
+               return 0;
+       }
+
+       if (gart_info->bus_addr) {
+
+               max_pages = (gart_info->table_size / sizeof(u32));
+               pages = (entry->pages <= max_pages)
+                 ? entry->pages : max_pages;
+
+               for (i = 0; i < pages; i++) {
+                       if (!entry->busaddr[i])
+                               break;
+                       pci_unmap_page(dev->pdev, entry->busaddr[i],
+                                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               }
+
+               if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+                       gart_info->bus_addr = 0;
+       }
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
+           gart_info->table_handle) {
+               drm_ati_free_pcigart_table(dev, gart_info);
+       }
+
+       return 1;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
+
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+       struct drm_local_map *map = &gart_info->mapping;
+       struct drm_sg_mem *entry = dev->sg;
+       void *address = NULL;
+       unsigned long pages;
+       u32 *pci_gart = NULL, page_base, gart_idx;
+       dma_addr_t bus_address = 0;
+       int i, j, ret = 0;
+       int max_ati_pages, max_real_pages;
+
+       if (!entry) {
+               DRM_ERROR("no scatter/gather memory!\n");
+               goto done;
+       }
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+               DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+
+               ret = drm_ati_alloc_pcigart_table(dev, gart_info);
+               if (ret) {
+                       DRM_ERROR("cannot allocate PCI GART page!\n");
+                       goto done;
+               }
+
+               pci_gart = gart_info->table_handle->vaddr;
+               address = gart_info->table_handle->vaddr;
+               bus_address = gart_info->table_handle->busaddr;
+       } else {
+               address = gart_info->addr;
+               bus_address = gart_info->bus_addr;
+               DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
+                         (unsigned long long)bus_address,
+                         (unsigned long)address);
+       }
+
+
+       max_ati_pages = (gart_info->table_size / sizeof(u32));
+       max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
+       pages = (entry->pages <= max_real_pages)
+           ? entry->pages : max_real_pages;
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+               memset(pci_gart, 0, max_ati_pages * sizeof(u32));
+       } else {
+               memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
+       }
+
+       gart_idx = 0;
+       for (i = 0; i < pages; i++) {
+               /* we need to support large memory configurations */
+               entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
+                                                0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               if (entry->busaddr[i] == 0) {
+                       DRM_ERROR("unable to map PCIGART pages!\n");
+                       drm_ati_pcigart_cleanup(dev, gart_info);
+                       address = NULL;
+                       bus_address = 0;
+                       goto done;
+               }
+               page_base = (u32) entry->busaddr[i];
+
+               for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+                       u32 val;
+
+                       switch(gart_info->gart_reg_if) {
+                       case DRM_ATI_GART_IGP:
+                               val = page_base | 0xc;
+                               break;
+                       case DRM_ATI_GART_PCIE:
+                               val = (page_base >> 8) | 0xc;
+                               break;
+                       default:
+                       case DRM_ATI_GART_PCI:
+                               val = page_base;
+                               break;
+                       }
+                       if (gart_info->gart_table_location ==
+                           DRM_ATI_GART_MAIN)
+                               pci_gart[gart_idx] = cpu_to_le32(val);
+                       else
+                               DRM_WRITE32(map, gart_idx * sizeof(u32), val);
+                       gart_idx++;
+                       page_base += ATI_PCIGART_PAGE_SIZE;
+               }
+       }
+       ret = 1;
+
+#if defined(__i386__) || defined(__x86_64__)
+       wbinvd();
+#else
+       mb();
+#endif
+
+      done:
+       gart_info->addr = address;
+       gart_info->bus_addr = bus_address;
+       return ret;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_init);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_agpsupport.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_agpsupport.c
new file mode 100644 (file)
index 0000000..d68888f
--- /dev/null
@@ -0,0 +1,506 @@
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include <linux/module.h>
+
+#if __OS_HAS_AGP
+
+#include <asm/agp.h>
+
+/**
+ * Get AGP information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a (output) drm_agp_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been initialized and acquired and fills in the
+ * drm_agp_info structure with the information in drm_agp_head::agp_info.
+ */
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
+{
+       DRM_AGP_KERN *kern;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+
+       kern = &dev->agp->agp_info;
+       info->agp_version_major = kern->version.major;
+       info->agp_version_minor = kern->version.minor;
+       info->mode = kern->mode;
+       info->aperture_base = kern->aper_base;
+       info->aperture_size = kern->aper_size * 1024 * 1024;
+       info->memory_allowed = kern->max_memory << PAGE_SHIFT;
+       info->memory_used = kern->current_memory << PAGE_SHIFT;
+       info->id_vendor = kern->device->vendor;
+       info->id_device = kern->device->device;
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_info);
+
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_info *info = data;
+       int err;
+
+       err = drm_agp_info(dev, info);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+/**
+ * Acquire the AGP device.
+ *
+ * \param dev DRM device that is to acquire AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire(struct drm_device * dev)
+{
+       if (!dev->agp)
+               return -ENODEV;
+       if (dev->agp->acquired)
+               return -EBUSY;
+       if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
+               return -ENODEV;
+       dev->agp->acquired = 1;
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_acquire);
+
+/**
+ * Acquire the AGP device (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
+}
+
+/**
+ * Release the AGP device.
+ *
+ * \param dev DRM device that is to release AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
+ */
+int drm_agp_release(struct drm_device * dev)
+{
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       agp_backend_release(dev->agp->bridge);
+       dev->agp->acquired = 0;
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_release);
+
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       return drm_agp_release(dev);
+}
+
+/**
+ * Enable the AGP bus.
+ *
+ * \param dev DRM device that has previously acquired AGP.
+ * \param mode Requested AGP mode.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired but not enabled, and calls
+ * \c agp_enable.
+ */
+int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
+{
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+
+       dev->agp->mode = mode.mode;
+       agp_enable(dev->agp->bridge, mode.mode);
+       dev->agp->enabled = 1;
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_enable);
+
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_agp_mode *mode = data;
+
+       return drm_agp_enable(dev, *mode);
+}
+
+/**
+ * Allocate AGP memory.
+ *
+ * \param inode device inode.
+ * \param file_priv file private pointer.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired, allocates the
+ * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ */
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+       struct drm_agp_mem *entry;
+       DRM_AGP_MEM *memory;
+       unsigned long pages;
+       u32 type;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+               return -ENOMEM;
+
+       memset(entry, 0, sizeof(*entry));
+
+       pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+       type = (u32) request->type;
+       if (!(memory = drm_alloc_agp(dev, pages, type))) {
+               kfree(entry);
+               return -ENOMEM;
+       }
+
+       entry->handle = (unsigned long)memory->key + 1;
+       entry->memory = memory;
+       entry->bound = 0;
+       entry->pages = pages;
+       list_add(&entry->head, &dev->agp->memory);
+
+       request->handle = entry->handle;
+       request->physical = memory->physical;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_alloc);
+
+
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_agp_buffer *request = data;
+
+       return drm_agp_alloc(dev, request);
+}
+
+/**
+ * Search for the AGP memory entry associated with a handle.
+ *
+ * \param dev DRM device structure.
+ * \param handle AGP memory handle.
+ * \return pointer to the drm_agp_mem structure associated with \p handle.
+ *
+ * Walks through drm_agp_head::memory until finding a matching handle.
+ */
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
+                                          unsigned long handle)
+{
+       struct drm_agp_mem *entry;
+
+       list_for_each_entry(entry, &dev->agp->memory, head) {
+               if (entry->handle == handle)
+                       return entry;
+       }
+       return NULL;
+}
+
+/**
+ * Unbind AGP memory from the GATT (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and acquired, looks-up the AGP memory
+ * entry and passes it to the unbind_agp() function.
+ */
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+       struct drm_agp_mem *entry;
+       int ret;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (!entry->bound)
+               return -EINVAL;
+       ret = drm_unbind_agp(entry->memory);
+       if (ret == 0)
+               entry->bound = 0;
+       return ret;
+}
+EXPORT_SYMBOL(drm_agp_unbind);
+
+
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_agp_binding *request = data;
+
+       return drm_agp_unbind(dev, request);
+}
+
+/**
+ * Bind AGP memory into the GATT (ioctl)
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and that no memory
+ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
+ * it to bind_agp() function.
+ */
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+       struct drm_agp_mem *entry;
+       int retcode;
+       int page;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (entry->bound)
+               return -EINVAL;
+       page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+       if ((retcode = drm_bind_agp(entry->memory, page)))
+               return retcode;
+       entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+       DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
+                 dev->agp->base, entry->bound);
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_bind);
+
+
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_binding *request = data;
+
+       return drm_agp_bind(dev, request);
+}
+
+/**
+ * Free AGP memory (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and looks up the
+ * AGP memory entry. If the memory it's currently bound, unbind it via
+ * unbind_agp(). Frees it via free_agp() as well as the entry itself
+ * and unlinks from the doubly linked list it's inserted in.
+ */
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+       struct drm_agp_mem *entry;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (entry->bound)
+               drm_unbind_agp(entry->memory);
+
+       list_del(&entry->head);
+
+       drm_free_agp(entry->memory, entry->pages);
+       kfree(entry);
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_free);
+
+
+
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_buffer *request = data;
+
+       return drm_agp_free(dev, request);
+}
+
+/**
+ * Initialize the AGP resources.
+ *
+ * \return pointer to a drm_agp_head structure.
+ *
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
+ */
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+       struct drm_agp_head *head = NULL;
+
+       if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+               return NULL;
+       memset((void *)head, 0, sizeof(*head));
+       head->bridge = agp_find_bridge(dev->pdev);
+       if (!head->bridge) {
+               if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
+                       kfree(head);
+                       return NULL;
+               }
+               agp_copy_info(head->bridge, &head->agp_info);
+               agp_backend_release(head->bridge);
+       } else {
+               agp_copy_info(head->bridge, &head->agp_info);
+       }
+       if (head->agp_info.chipset == NOT_SUPPORTED) {
+               kfree(head);
+               return NULL;
+       }
+       INIT_LIST_HEAD(&head->memory);
+       head->cant_use_aperture = head->agp_info.cant_use_aperture;
+       head->page_mask = head->agp_info.page_mask;
+       head->base = head->agp_info.aper_base;
+       return head;
+}
+
+/** Calls agp_allocate_memory() */
+DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
+                                    size_t pages, u32 type)
+{
+       return agp_allocate_memory(bridge, pages, type);
+}
+
+/** Calls agp_free_memory() */
+int drm_agp_free_memory(DRM_AGP_MEM * handle)
+{
+       if (!handle)
+               return 0;
+       agp_free_memory(handle);
+       return 1;
+}
+
+/** Calls agp_bind_memory() */
+int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
+{
+       if (!handle)
+               return -EINVAL;
+       return agp_bind_memory(handle, start);
+}
+
+/** Calls agp_unbind_memory() */
+int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
+{
+       if (!handle)
+               return -EINVAL;
+       return agp_unbind_memory(handle);
+}
+
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+                  struct page **pages,
+                  unsigned long num_pages,
+                  uint32_t gtt_offset,
+                  u32 type)
+{
+       DRM_AGP_MEM *mem;
+       int ret, i;
+
+       DRM_DEBUG("\n");
+
+       mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+                                     type);
+       if (mem == NULL) {
+               DRM_ERROR("Failed to allocate memory for %ld pages\n",
+                         num_pages);
+               return NULL;
+       }
+
+       for (i = 0; i < num_pages; i++)
+               mem->pages[i] = pages[i];
+       mem->page_count = num_pages;
+
+       mem->is_flushed = true;
+       ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+       if (ret != 0) {
+               DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+               agp_free_memory(mem);
+               return NULL;
+       }
+
+       return mem;
+}
+EXPORT_SYMBOL(drm_agp_bind_pages);
+
+void drm_agp_chipset_flush(struct drm_device *dev)
+{
+       agp_flush_chipset(dev->agp->bridge);
+}
+EXPORT_SYMBOL(drm_agp_chipset_flush);
+
+#endif /* __OS_HAS_AGP */
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_auth.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_auth.c
new file mode 100644 (file)
index 0000000..932b5aa
--- /dev/null
@@ -0,0 +1,191 @@
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Find the file with the given magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches in drm_device::magiclist within all files with the same hash key
+ * the one with matching magic number, while holding the drm_device::struct_mutex
+ * lock.
+ */
+static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
+{
+       struct drm_file *retval = NULL;
+       struct drm_magic_entry *pt;
+       struct drm_hash_item *hash;
+       struct drm_device *dev = master->minor->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
+               pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+               retval = pt->priv;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return retval;
+}
+
+/**
+ * Adds a magic number.
+ *
+ * \param dev DRM device.
+ * \param priv file private data.
+ * \param magic magic number.
+ *
+ * Creates a drm_magic_entry structure and appends to the linked list
+ * associated the magic number hash key in drm_device::magiclist, while holding
+ * the drm_device::struct_mutex lock.
+ */
+static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
+                        drm_magic_t magic)
+{
+       struct drm_magic_entry *entry;
+       struct drm_device *dev = master->minor->dev;
+       DRM_DEBUG("%d\n", magic);
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+       memset(entry, 0, sizeof(*entry));
+       entry->priv = priv;
+       entry->hash_item.key = (unsigned long)magic;
+       mutex_lock(&dev->struct_mutex);
+       drm_ht_insert_item(&master->magiclist, &entry->hash_item);
+       list_add_tail(&entry->head, &master->magicfree);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Remove a magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches and unlinks the entry in drm_device::magiclist with the magic
+ * number hash key, while holding the drm_device::struct_mutex lock.
+ */
+static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+{
+       struct drm_magic_entry *pt;
+       struct drm_hash_item *hash;
+       struct drm_device *dev = master->minor->dev;
+
+       DRM_DEBUG("%d\n", magic);
+
+       mutex_lock(&dev->struct_mutex);
+       if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+       pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+       drm_ht_remove_item(&master->magiclist, hash);
+       list_del(&pt->head);
+       mutex_unlock(&dev->struct_mutex);
+
+       kfree(pt);
+
+       return 0;
+}
+
+/**
+ * Get a unique magic number (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a resulting drm_auth structure.
+ * \return zero on success, or a negative number on failure.
+ *
+ * If there is a magic number in drm_file::magic then use it, otherwise
+ * searches an unique non-zero magic number and add it associating it with \p
+ * file_priv.
+ */
+int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       static drm_magic_t sequence = 0;
+       static DEFINE_SPINLOCK(lock);
+       struct drm_auth *auth = data;
+
+       /* Find unique magic */
+       if (file_priv->magic) {
+               auth->magic = file_priv->magic;
+       } else {
+               do {
+                       spin_lock(&lock);
+                       if (!sequence)
+                               ++sequence;     /* reserve 0 */
+                       auth->magic = sequence++;
+                       spin_unlock(&lock);
+               } while (drm_find_file(file_priv->master, auth->magic));
+               file_priv->magic = auth->magic;
+               drm_add_magic(file_priv->master, file_priv, auth->magic);
+       }
+
+       DRM_DEBUG("%u\n", auth->magic);
+
+       return 0;
+}
+
+/**
+ * Authenticate with a magic.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_auth structure.
+ * \return zero if authentication successed, or a negative number otherwise.
+ *
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
+ */
+int drm_authmagic(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_auth *auth = data;
+       struct drm_file *file;
+
+       DRM_DEBUG("%u\n", auth->magic);
+       if ((file = drm_find_file(file_priv->master, auth->magic))) {
+               file->authenticated = 1;
+               drm_remove_magic(file_priv->master, auth->magic);
+               return 0;
+       }
+       return -EINVAL;
+}
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_bufs.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_bufs.c
new file mode 100644 (file)
index 0000000..3d09e30
--- /dev/null
@@ -0,0 +1,1651 @@
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/log2.h>
+#include <asm/shmparam.h>
+#include "drmP.h"
+
+resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
+{
+       return pci_resource_start(dev->pdev, resource);
+}
+EXPORT_SYMBOL(drm_get_resource_start);
+
+resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
+{
+       return pci_resource_len(dev->pdev, resource);
+}
+
+EXPORT_SYMBOL(drm_get_resource_len);
+
+static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+                                                 struct drm_local_map *map)
+{
+       struct drm_map_list *entry;
+       list_for_each_entry(entry, &dev->maplist, head) {
+               /*
+                * Because the kernel-userspace ABI is fixed at a 32-bit offset
+                * while PCI resources may live above that, we ignore the map
+                * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+                * It is assumed that each driver will have only one resource of
+                * each type.
+                */
+               if (!entry->map ||
+                   map->type != entry->map->type ||
+                   entry->master != dev->primary->master)
+                       continue;
+               switch (map->type) {
+               case _DRM_SHM:
+                       if (map->flags != _DRM_CONTAINS_LOCK)
+                               break;
+               case _DRM_REGISTERS:
+               case _DRM_FRAME_BUFFER:
+                       return entry;
+               default: /* Make gcc happy */
+                       ;
+               }
+               if (entry->map->offset == map->offset)
+                       return entry;
+       }
+
+       return NULL;
+}
+
+static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
+                         unsigned long user_token, int hashed_handle, int shm)
+{
+       int use_hashed_handle, shift;
+       unsigned long add;
+
+#if (BITS_PER_LONG == 64)
+       use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
+#elif (BITS_PER_LONG == 32)
+       use_hashed_handle = hashed_handle;
+#else
+#error Unsupported long size. Neither 64 nor 32 bits.
+#endif
+
+       if (!use_hashed_handle) {
+               int ret;
+               hash->key = user_token >> PAGE_SHIFT;
+               ret = drm_ht_insert_item(&dev->map_hash, hash);
+               if (ret != -EINVAL)
+                       return ret;
+       }
+
+       shift = 0;
+       add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
+       if (shm && (SHMLBA > PAGE_SIZE)) {
+               int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
+
+               /* For shared memory, we have to preserve the SHMLBA
+                * bits of the eventual vma->vm_pgoff value during
+                * mmap().  Otherwise we run into cache aliasing problems
+                * on some platforms.  On these platforms, the pgoff of
+                * a mmap() request is used to pick a suitable virtual
+                * address for the mmap() region such that it will not
+                * cause cache aliasing problems.
+                *
+                * Therefore, make sure the SHMLBA relevant bits of the
+                * hash value we use are equal to those in the original
+                * kernel virtual address.
+                */
+               shift = bits;
+               add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
+       }
+
+       return drm_ht_just_insert_please(&dev->map_hash, hash,
+                                        user_token, 32 - PAGE_SHIFT - 3,
+                                        shift, add);
+}
+
+/**
+ * Core function to create a range of memory available for mapping by a
+ * non-root process.
+ *
+ * Adjusts the memory offset to its absolute value according to the mapping
+ * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
+ * applicable and if supported by the kernel.
+ */
+static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
+                          unsigned int size, enum drm_map_type type,
+                          enum drm_map_flags flags,
+                          struct drm_map_list ** maplist)
+{
+       struct drm_local_map *map;
+       struct drm_map_list *list;
+       drm_dma_handle_t *dmah;
+       unsigned long user_token;
+       int ret;
+
+       map = kmalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       map->offset = offset;
+       map->size = size;
+       map->flags = flags;
+       map->type = type;
+
+       /* Only allow shared memory to be removable since we only keep enough
+        * book keeping information about shared memory to allow for removal
+        * when processes fork.
+        */
+       if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
+               kfree(map);
+               return -EINVAL;
+       }
+       DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
+                 (unsigned long long)map->offset, map->size, map->type);
+
+       /* page-align _DRM_SHM maps. They are allocated here so there is no security
+        * hole created by that and it works around various broken drivers that use
+        * a non-aligned quantity to map the SAREA. --BenH
+        */
+       if (map->type == _DRM_SHM)
+               map->size = PAGE_ALIGN(map->size);
+
+       if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
+               kfree(map);
+               return -EINVAL;
+       }
+       map->mtrr = -1;
+       map->handle = NULL;
+
+       switch (map->type) {
+       case _DRM_REGISTERS:
+       case _DRM_FRAME_BUFFER:
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
+               if (map->offset + (map->size-1) < map->offset ||
+                   map->offset < virt_to_phys(high_memory)) {
+                       kfree(map);
+                       return -EINVAL;
+               }
+#endif
+#ifdef __alpha__
+               map->offset += dev->hose->mem_space->start;
+#endif
+               /* Some drivers preinitialize some maps, without the X Server
+                * needing to be aware of it.  Therefore, we just return success
+                * when the server tries to create a duplicate map.
+                */
+               list = drm_find_matching_map(dev, map);
+               if (list != NULL) {
+                       if (list->map->size != map->size) {
+                               DRM_DEBUG("Matching maps of type %d with "
+                                         "mismatched sizes, (%ld vs %ld)\n",
+                                         map->type, map->size,
+                                         list->map->size);
+                               list->map->size = map->size;
+                       }
+
+                       kfree(map);
+                       *maplist = list;
+                       return 0;
+               }
+
+               if (drm_core_has_MTRR(dev)) {
+                       if (map->type == _DRM_FRAME_BUFFER ||
+                           (map->flags & _DRM_WRITE_COMBINING)) {
+                               map->mtrr = mtrr_add(map->offset, map->size,
+                                                    MTRR_TYPE_WRCOMB, 1);
+                       }
+               }
+               if (map->type == _DRM_REGISTERS) {
+                       map->handle = ioremap(map->offset, map->size);
+                       if (!map->handle) {
+                               kfree(map);
+                               return -ENOMEM;
+                       }
+               }
+
+               break;
+       case _DRM_SHM:
+               list = drm_find_matching_map(dev, map);
+               if (list != NULL) {
+                       if(list->map->size != map->size) {
+                               DRM_DEBUG("Matching maps of type %d with "
+                                         "mismatched sizes, (%ld vs %ld)\n",
+                                         map->type, map->size, list->map->size);
+                               list->map->size = map->size;
+                       }
+
+                       kfree(map);
+                       *maplist = list;
+                       return 0;
+               }
+               map->handle = vmalloc_user(map->size);
+               DRM_DEBUG("%lu %d %p\n",
+                         map->size, drm_order(map->size), map->handle);
+               if (!map->handle) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+               map->offset = (unsigned long)map->handle;
+               if (map->flags & _DRM_CONTAINS_LOCK) {
+                       /* Prevent a 2nd X Server from creating a 2nd lock */
+                       if (dev->primary->master->lock.hw_lock != NULL) {
+                               vfree(map->handle);
+                               kfree(map);
+                               return -EBUSY;
+                       }
+                       dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;   /* Pointer to lock */
+               }
+               break;
+       case _DRM_AGP: {
+               struct drm_agp_mem *entry;
+               int valid = 0;
+
+               if (!drm_core_has_AGP(dev)) {
+                       kfree(map);
+                       return -EINVAL;
+               }
+#ifdef __alpha__
+               map->offset += dev->hose->mem_space->start;
+#endif
+               /* In some cases (i810 driver), user space may have already
+                * added the AGP base itself, because dev->agp->base previously
+                * only got set during AGP enable.  So, only add the base
+                * address if the map's offset isn't already within the
+                * aperture.
+                */
+               if (map->offset < dev->agp->base ||
+                   map->offset > dev->agp->base +
+                   dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
+                       map->offset += dev->agp->base;
+               }
+               map->mtrr = dev->agp->agp_mtrr; /* for getmap */
+
+               /* This assumes the DRM is in total control of AGP space.
+                * It's not always the case as AGP can be in the control
+                * of user space (i.e. i810 driver). So this loop will get
+                * skipped and we double check that dev->agp->memory is
+                * actually set as well as being invalid before EPERM'ing
+                */
+               list_for_each_entry(entry, &dev->agp->memory, head) {
+                       if ((map->offset >= entry->bound) &&
+                           (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
+                               valid = 1;
+                               break;
+                       }
+               }
+               if (!list_empty(&dev->agp->memory) && !valid) {
+                       kfree(map);
+                       return -EPERM;
+               }
+               DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
+                         (unsigned long long)map->offset, map->size);
+
+               break;
+       }
+       case _DRM_GEM:
+               DRM_ERROR("tried to addmap GEM object\n");
+               break;
+       case _DRM_SCATTER_GATHER:
+               if (!dev->sg) {
+                       kfree(map);
+                       return -EINVAL;
+               }
+               map->offset += (unsigned long)dev->sg->virtual;
+               break;
+       case _DRM_CONSISTENT:
+               /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
+                * As we're limiting the address to 2^32-1 (or less),
+                * casting it down to 32 bits is no problem, but we
+                * need to point to a 64bit variable first. */
+               dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
+               if (!dmah) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+               map->handle = dmah->vaddr;
+               map->offset = (unsigned long)dmah->busaddr;
+               kfree(dmah);
+               break;
+       default:
+               kfree(map);
+               return -EINVAL;
+       }
+
+       list = kmalloc(sizeof(*list), GFP_KERNEL);
+       if (!list) {
+               if (map->type == _DRM_REGISTERS)
+                       iounmap(map->handle);
+               kfree(map);
+               return -EINVAL;
+       }
+       memset(list, 0, sizeof(*list));
+       list->map = map;
+
+       mutex_lock(&dev->struct_mutex);
+       list_add(&list->head, &dev->maplist);
+
+       /* Assign a 32-bit handle */
+       /* We do it here so that dev->struct_mutex protects the increment */
+       user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
+               map->offset;
+       ret = drm_map_handle(dev, &list->hash, user_token, 0,
+                            (map->type == _DRM_SHM));
+       if (ret) {
+               if (map->type == _DRM_REGISTERS)
+                       iounmap(map->handle);
+               kfree(map);
+               kfree(list);
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       list->user_token = list->hash.key << PAGE_SHIFT;
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!(map->flags & _DRM_DRIVER))
+               list->master = dev->primary->master;
+       *maplist = list;
+       return 0;
+       }
+
+int drm_addmap(struct drm_device * dev, resource_size_t offset,
+              unsigned int size, enum drm_map_type type,
+              enum drm_map_flags flags, struct drm_local_map ** map_ptr)
+{
+       struct drm_map_list *list;
+       int rc;
+
+       rc = drm_addmap_core(dev, offset, size, type, flags, &list);
+       if (!rc)
+               *map_ptr = list->map;
+       return rc;
+}
+
+EXPORT_SYMBOL(drm_addmap);
+
+/**
+ * Ioctl to specify a range of memory that is available for mapping by a
+ * non-root process.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ */
+int drm_addmap_ioctl(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_map *map = data;
+       struct drm_map_list *maplist;
+       int err;
+
+       if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
+               return -EPERM;
+
+       err = drm_addmap_core(dev, map->offset, map->size, map->type,
+                             map->flags, &maplist);
+
+       if (err)
+               return err;
+
+       /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
+       map->handle = (void *)(unsigned long)maplist->user_token;
+       return 0;
+}
+
+/**
+ * Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ *
+ * Searches the map on drm_device::maplist, removes it from the list, see if
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+ * \sa drm_addmap
+ */
+int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
+{
+       struct drm_map_list *r_list = NULL, *list_t;
+       drm_dma_handle_t dmah;
+       int found = 0;
+       struct drm_master *master;
+
+       /* Find the list entry for the map and remove it */
+       list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+               if (r_list->map == map) {
+                       master = r_list->master;
+                       list_del(&r_list->head);
+                       drm_ht_remove_key(&dev->map_hash,
+                                         r_list->user_token >> PAGE_SHIFT);
+                       kfree(r_list);
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               return -EINVAL;
+
+       switch (map->type) {
+       case _DRM_REGISTERS:
+               iounmap(map->handle);
+               /* FALLTHROUGH */
+       case _DRM_FRAME_BUFFER:
+               if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+                       int retcode;
+                       retcode = mtrr_del(map->mtrr, map->offset, map->size);
+                       DRM_DEBUG("mtrr_del=%d\n", retcode);
+               }
+               break;
+       case _DRM_SHM:
+               vfree(map->handle);
+               if (master) {
+                       if (dev->sigdata.lock == master->lock.hw_lock)
+                               dev->sigdata.lock = NULL;
+                       master->lock.hw_lock = NULL;   /* SHM removed */
+                       master->lock.file_priv = NULL;
+                       wake_up_interruptible_all(&master->lock.lock_queue);
+               }
+               break;
+       case _DRM_AGP:
+       case _DRM_SCATTER_GATHER:
+               break;
+       case _DRM_CONSISTENT:
+               dmah.vaddr = map->handle;
+               dmah.busaddr = map->offset;
+               dmah.size = map->size;
+               __drm_pci_free(dev, &dmah);
+               break;
+       case _DRM_GEM:
+               DRM_ERROR("tried to rmmap GEM object\n");
+               break;
+       }
+       kfree(map);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_rmmap_locked);
+
+int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
+{
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_rmmap_locked(dev, map);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_rmmap);
+
+/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+ * exit uncleanly.  Therefore, having userland manually remove mappings seems
+ * like a pointless exercise since they're going away anyway.
+ *
+ * One use case might be after addmap is allowed for normal users for SHM and
+ * gets used by drivers that the server doesn't need to care about.  This seems
+ * unlikely.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_map structure.
+ * \return zero on success or a negative value on error.
+ */
+int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_map *request = data;
+       struct drm_local_map *map = NULL;
+       struct drm_map_list *r_list;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               if (r_list->map &&
+                   r_list->user_token == (unsigned long)request->handle &&
+                   r_list->map->flags & _DRM_REMOVABLE) {
+                       map = r_list->map;
+                       break;
+               }
+       }
+
+       /* List has wrapped around to the head pointer, or its empty we didn't
+        * find anything.
+        */
+       if (list_empty(&dev->maplist) || !map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       /* Register and framebuffer maps are permanent */
+       if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
+               mutex_unlock(&dev->struct_mutex);
+               return 0;
+       }
+
+       ret = drm_rmmap_locked(dev, map);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+/**
+ * Cleanup after an error on one of the addbufs() functions.
+ *
+ * \param dev DRM device.
+ * \param entry buffer entry where the error occurred.
+ *
+ * Frees any pages and buffers associated with the given entry.
+ */
+static void drm_cleanup_buf_error(struct drm_device * dev,
+                                 struct drm_buf_entry * entry)
+{
+       int i;
+
+       if (entry->seg_count) {
+               for (i = 0; i < entry->seg_count; i++) {
+                       if (entry->seglist[i]) {
+                               drm_pci_free(dev, entry->seglist[i]);
+                       }
+               }
+               kfree(entry->seglist);
+
+               entry->seg_count = 0;
+       }
+
+       if (entry->buf_count) {
+               for (i = 0; i < entry->buf_count; i++) {
+                       kfree(entry->buflist[i].dev_private);
+               }
+               kfree(entry->buflist);
+
+               entry->buf_count = 0;
+       }
+}
+
+#if __OS_HAS_AGP
+/**
+ * Add AGP buffers for DMA transfers.
+ *
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
+ * \return zero on success or a negative number on failure.
+ *
+ * After some sanity checks creates a drm_buf structure for each buffer and
+ * reallocates the buffer list of the same size order to accommodate the new
+ * buffers.
+ */
+int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_agp_mem *agp_entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i, valid;
+       struct drm_buf **temp_buflist;
+
+       if (!dma)
+               return -EINVAL;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = dev->agp->base + request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lx\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       /* Make sure buffers are located in AGP memory that we own */
+       valid = 0;
+       list_for_each_entry(agp_entry, &dev->agp->memory, head) {
+               if ((agp_offset >= agp_entry->bound) &&
+                   (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
+                       valid = 1;
+                       break;
+               }
+       }
+       if (!list_empty(&dev->agp->memory) && !valid) {
+               DRM_DEBUG("zone invalid\n");
+               return -EINVAL;
+       }
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+               memset(buf->dev_private, 0, buf->dev_priv_size);
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_AGP;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+EXPORT_SYMBOL(drm_addbufs_agp);
+#endif                         /* __OS_HAS_AGP */
+
+int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int count;
+       int order;
+       int size;
+       int total;
+       int page_order;
+       struct drm_buf_entry *entry;
+       drm_dma_handle_t *dmah;
+       struct drm_buf *buf;
+       int alignment;
+       unsigned long offset;
+       int i;
+       int byte_count;
+       int page_count;
+       unsigned long *temp_pagelist;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
+                 request->count, request->size, size, order, dev->queue_count);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+       if (!entry->seglist) {
+               kfree(entry->buflist);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->seglist, 0, count * sizeof(*entry->seglist));
+
+       /* Keep the original pagelist until we know all the allocations
+        * have succeeded
+        */
+       temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
+                              sizeof(*dma->pagelist), GFP_KERNEL);
+       if (!temp_pagelist) {
+               kfree(entry->buflist);
+               kfree(entry->seglist);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memcpy(temp_pagelist,
+              dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
+       DRM_DEBUG("pagelist: %d entries\n",
+                 dma->page_count + (count << page_order));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+       byte_count = 0;
+       page_count = 0;
+
+       while (entry->buf_count < count) {
+
+               dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+
+               if (!dmah) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       entry->seg_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       kfree(temp_pagelist);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+               entry->seglist[entry->seg_count++] = dmah;
+               for (i = 0; i < (1 << page_order); i++) {
+                       DRM_DEBUG("page %d @ 0x%08lx\n",
+                                 dma->page_count + page_count,
+                                 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
+                       temp_pagelist[dma->page_count + page_count++]
+                               = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
+               }
+               for (offset = 0;
+                    offset + size <= total && entry->buf_count < count;
+                    offset += alignment, ++entry->buf_count) {
+                       buf = &entry->buflist[entry->buf_count];
+                       buf->idx = dma->buf_count + entry->buf_count;
+                       buf->total = alignment;
+                       buf->order = order;
+                       buf->used = 0;
+                       buf->offset = (dma->byte_count + byte_count + offset);
+                       buf->address = (void *)(dmah->vaddr + offset);
+                       buf->bus_address = dmah->busaddr + offset;
+                       buf->next = NULL;
+                       buf->waiting = 0;
+                       buf->pending = 0;
+                       init_waitqueue_head(&buf->dma_wait);
+                       buf->file_priv = NULL;
+
+                       buf->dev_priv_size = dev->driver->dev_priv_size;
+                       buf->dev_private = kmalloc(buf->dev_priv_size,
+                                                 GFP_KERNEL);
+                       if (!buf->dev_private) {
+                               /* Set count correctly so we free the proper amount. */
+                               entry->buf_count = count;
+                               entry->seg_count = count;
+                               drm_cleanup_buf_error(dev, entry);
+                               kfree(temp_pagelist);
+                               mutex_unlock(&dev->struct_mutex);
+                               atomic_dec(&dev->buf_alloc);
+                               return -ENOMEM;
+                       }
+                       memset(buf->dev_private, 0, buf->dev_priv_size);
+
+                       DRM_DEBUG("buffer %d @ %p\n",
+                                 entry->buf_count, buf->address);
+               }
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               kfree(temp_pagelist);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       /* No allocations failed, so now we can replace the orginal pagelist
+        * with the new one.
+        */
+       if (dma->page_count) {
+               kfree(dma->pagelist);
+       }
+       dma->pagelist = temp_pagelist;
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += entry->seg_count << page_order;
+       dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       if (request->flags & _DRM_PCI_BUFFER_RO)
+               dma->flags = _DRM_DMA_USE_PCI_RO;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+
+}
+EXPORT_SYMBOL(drm_addbufs_pci);
+
+static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+                               GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset
+                                       + (unsigned long)dev->sg->virtual);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+
+               memset(buf->dev_private, 0, buf->dev_priv_size);
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_SG;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+
+static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+                               GFP_KERNEL);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+               memset(buf->dev_private, 0, buf->dev_priv_size);
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = krealloc(dma->buflist,
+                               (dma->buf_count + entry->buf_count) *
+                               sizeof(*dma->buflist), GFP_KERNEL);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_FB;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+
+
+/**
+ * Add buffers for DMA transfers (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_buf_desc request.
+ * \return zero on success or a negative number on failure.
+ *
+ * According with the memory type specified in drm_buf_desc::flags and the
+ * build options, it dispatches the call either to addbufs_agp(),
+ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
+ * PCI memory respectively.
+ */
+int drm_addbufs(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_buf_desc *request = data;
+       int ret;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+#if __OS_HAS_AGP
+       if (request->flags & _DRM_AGP_BUFFER)
+               ret = drm_addbufs_agp(dev, request);
+       else
+#endif
+       if (request->flags & _DRM_SG_BUFFER)
+               ret = drm_addbufs_sg(dev, request);
+       else if (request->flags & _DRM_FB_BUFFER)
+               ret = drm_addbufs_fb(dev, request);
+       else
+               ret = drm_addbufs_pci(dev, request);
+
+       return ret;
+}
+
+/**
+ * Get information about the buffer mappings.
+ *
+ * This was originally mean for debugging purposes, or by a sophisticated
+ * client library to determine how best to use the available buffers (e.g.,
+ * large buffers can be used for image transfer).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * lock, preventing of allocating more buffers after this call. Information
+ * about each requested buffer is then copied into user space.
+ */
+int drm_infobufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_info *request = data;
+       int i;
+       int count;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       spin_lock(&dev->count_lock);
+       if (atomic_read(&dev->buf_alloc)) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       ++dev->buf_use;         /* Can't allocate more after this call */
+       spin_unlock(&dev->count_lock);
+
+       for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+               if (dma->bufs[i].buf_count)
+                       ++count;
+       }
+
+       DRM_DEBUG("count = %d\n", count);
+
+       if (request->count >= count) {
+               for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+                       if (dma->bufs[i].buf_count) {
+                               struct drm_buf_desc __user *to =
+                                   &request->list[count];
+                               struct drm_buf_entry *from = &dma->bufs[i];
+                               struct drm_freelist *list = &dma->bufs[i].freelist;
+                               if (copy_to_user(&to->count,
+                                                &from->buf_count,
+                                                sizeof(from->buf_count)) ||
+                                   copy_to_user(&to->size,
+                                                &from->buf_size,
+                                                sizeof(from->buf_size)) ||
+                                   copy_to_user(&to->low_mark,
+                                                &list->low_mark,
+                                                sizeof(list->low_mark)) ||
+                                   copy_to_user(&to->high_mark,
+                                                &list->high_mark,
+                                                sizeof(list->high_mark)))
+                                       return -EFAULT;
+
+                               DRM_DEBUG("%d %d %d %d %d\n",
+                                         i,
+                                         dma->bufs[i].buf_count,
+                                         dma->bufs[i].buf_size,
+                                         dma->bufs[i].freelist.low_mark,
+                                         dma->bufs[i].freelist.high_mark);
+                               ++count;
+                       }
+               }
+       }
+       request->count = count;
+
+       return 0;
+}
+
+/**
+ * Specifies a low and high water mark for buffer allocation
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg a pointer to a drm_buf_desc structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies that the size order is bounded between the admissible orders and
+ * updates the respective drm_device_dma::bufs entry low and high water mark.
+ *
+ * \note This ioctl is deprecated and mostly never used.
+ */
+int drm_markbufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_desc *request = data;
+       int order;
+       struct drm_buf_entry *entry;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       DRM_DEBUG("%d, %d, %d\n",
+                 request->size, request->low_mark, request->high_mark);
+       order = drm_order(request->size);
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       entry = &dma->bufs[order];
+
+       if (request->low_mark < 0 || request->low_mark > entry->buf_count)
+               return -EINVAL;
+       if (request->high_mark < 0 || request->high_mark > entry->buf_count)
+               return -EINVAL;
+
+       entry->freelist.low_mark = request->low_mark;
+       entry->freelist.high_mark = request->high_mark;
+
+       return 0;
+}
+
+/**
+ * Unreserve the buffers in list, previously reserved using drmDMA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_free structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls free_buffer() for each used buffer.
+ * This function is primarily used for debugging.
+ */
+int drm_freebufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_free *request = data;
+       int i;
+       int idx;
+       struct drm_buf *buf;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       DRM_DEBUG("%d\n", request->count);
+       for (i = 0; i < request->count; i++) {
+               if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
+                       return -EFAULT;
+               if (idx < 0 || idx >= dma->buf_count) {
+                       DRM_ERROR("Index %d (of %d max)\n",
+                                 idx, dma->buf_count - 1);
+                       return -EINVAL;
+               }
+               buf = dma->buflist[idx];
+               if (buf->file_priv != file_priv) {
+                       DRM_ERROR("Process %d freeing buffer not owned\n",
+                                 task_pid_nr(current));
+                       return -EINVAL;
+               }
+               drm_free_buffer(dev, buf);
+       }
+
+       return 0;
+}
+
+/**
+ * Maps all of the DMA buffers into client-virtual space (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
+ */
+int drm_mapbufs(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int retcode = 0;
+       const int zero = 0;
+       unsigned long virtual;
+       unsigned long address;
+       struct drm_buf_map *request = data;
+       int i;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       spin_lock(&dev->count_lock);
+       if (atomic_read(&dev->buf_alloc)) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       dev->buf_use++;         /* Can't allocate more after this call */
+       spin_unlock(&dev->count_lock);
+
+       if (request->count >= dma->buf_count) {
+               if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+                   || (drm_core_check_feature(dev, DRIVER_SG)
+                       && (dma->flags & _DRM_DMA_USE_SG))
+                   || (drm_core_check_feature(dev, DRIVER_FB_DMA)
+                       && (dma->flags & _DRM_DMA_USE_FB))) {
+                       struct drm_local_map *map = dev->agp_buffer_map;
+                       unsigned long token = dev->agp_buffer_token;
+
+                       if (!map) {
+                               retcode = -EINVAL;
+                               goto done;
+                       }
+                       down_write(&current->mm->mmap_sem);
+                       virtual = do_mmap(file_priv->filp, 0, map->size,
+                                         PROT_READ | PROT_WRITE,
+                                         MAP_SHARED,
+                                         token);
+                       up_write(&current->mm->mmap_sem);
+               } else {
+                       down_write(&current->mm->mmap_sem);
+                       virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
+                                         PROT_READ | PROT_WRITE,
+                                         MAP_SHARED, 0);
+                       up_write(&current->mm->mmap_sem);
+               }
+               if (virtual > -1024UL) {
+                       /* Real error */
+                       retcode = (signed long)virtual;
+                       goto done;
+               }
+               request->virtual = (void __user *)virtual;
+
+               for (i = 0; i < dma->buf_count; i++) {
+                       if (copy_to_user(&request->list[i].idx,
+                                        &dma->buflist[i]->idx,
+                                        sizeof(request->list[0].idx))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       if (copy_to_user(&request->list[i].total,
+                                        &dma->buflist[i]->total,
+                                        sizeof(request->list[0].total))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       if (copy_to_user(&request->list[i].used,
+                                        &zero, sizeof(zero))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       address = virtual + dma->buflist[i]->offset;    /* *** */
+                       if (copy_to_user(&request->list[i].address,
+                                        &address, sizeof(address))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+      done:
+       request->count = dma->buf_count;
+       DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
+
+       return retcode;
+}
+
+/**
+ * Compute size order.  Returns the exponent of the smaller power of two which
+ * is greater or equal to given number.
+ *
+ * \param size size.
+ * \return order.
+ *
+ * \todo Can be made faster.
+ */
+int drm_order(unsigned long size)
+{
+       int order;
+       unsigned long tmp;
+
+       for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+
+       if (size & (size - 1))
+               ++order;
+
+       return order;
+}
+EXPORT_SYMBOL(drm_order);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_cache.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_cache.c
new file mode 100644 (file)
index 0000000..0e3bd5b
--- /dev/null
@@ -0,0 +1,99 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+#if defined(CONFIG_X86)
+static void
+drm_clflush_page(struct page *page)
+{
+       uint8_t *page_virtual;
+       unsigned int i;
+
+       if (unlikely(page == NULL))
+               return;
+
+       page_virtual = kmap_atomic(page, KM_USER0);
+       for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+               clflush(page_virtual + i);
+       kunmap_atomic(page_virtual, KM_USER0);
+}
+
+static void drm_cache_flush_clflush(struct page *pages[],
+                                   unsigned long num_pages)
+{
+       unsigned long i;
+
+       mb();
+       for (i = 0; i < num_pages; i++)
+               drm_clflush_page(*pages++);
+       mb();
+}
+
+static void
+drm_clflush_ipi_handler(void *null)
+{
+       wbinvd();
+}
+#endif
+
+void
+drm_clflush_pages(struct page *pages[], unsigned long num_pages)
+{
+
+#if defined(CONFIG_X86)
+       if (cpu_has_clflush) {
+               drm_cache_flush_clflush(pages, num_pages);
+               return;
+       }
+
+       if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+               printk(KERN_ERR "Timed out waiting for cache flush.\n");
+
+#elif defined(__powerpc__)
+       unsigned long i;
+       for (i = 0; i < num_pages; i++) {
+               struct page *page = pages[i];
+               void *page_virtual;
+
+               if (unlikely(page == NULL))
+                       continue;
+
+               page_virtual = kmap_atomic(page, KM_USER0);
+               flush_dcache_range((unsigned long)page_virtual,
+                                  (unsigned long)page_virtual + PAGE_SIZE);
+               kunmap_atomic(page_virtual, KM_USER0);
+       }
+#else
+       printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+       WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_pages);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_context.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_context.c
new file mode 100644 (file)
index 0000000..2607753
--- /dev/null
@@ -0,0 +1,469 @@
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * ChangeLog:
+ *  2001-11-16 Torsten Duwe <duwe@caldera.de>
+ *             added context constructor/destructor hooks,
+ *             needed by SiS driver's memory management.
+ */
+
+#include "drmP.h"
+
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+/**
+ * Free a handle from the context bitmap.
+ *
+ * \param dev DRM device.
+ * \param ctx_handle context handle.
+ *
+ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
+ * lock.
+ */
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_remove(&dev->ctx_idr, ctx_handle);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Context bitmap allocation.
+ *
+ * \param dev DRM device.
+ * \return (non-negative) context handle on success or a negative number on failure.
+ *
+ * Allocate a new idr from drm_device::ctx_idr while holding the
+ * drm_device::struct_mutex lock.
+ */
+static int drm_ctxbitmap_next(struct drm_device * dev)
+{
+       int new_id;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+       mutex_lock(&dev->struct_mutex);
+       ret = idr_get_new_above(&dev->ctx_idr, NULL,
+                               DRM_RESERVED_CONTEXTS, &new_id);
+       if (ret == -EAGAIN) {
+               mutex_unlock(&dev->struct_mutex);
+               goto again;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return new_id;
+}
+
+/**
+ * Context bitmap initialization.
+ *
+ * \param dev DRM device.
+ *
+ * Initialise the drm_device::ctx_idr
+ */
+int drm_ctxbitmap_init(struct drm_device * dev)
+{
+       idr_init(&dev->ctx_idr);
+       return 0;
+}
+
+/**
+ * Context bitmap cleanup.
+ *
+ * \param dev DRM device.
+ *
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
+ */
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_remove_all(&dev->ctx_idr);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name Per Context SAREA Support */
+/*@{*/
+
+/**
+ * Get per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Gets the map from drm_device::ctx_idr with the handle specified and
+ * returns its handle.
+ */
+int drm_getsareactx(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_ctx_priv_map *request = data;
+       struct drm_local_map *map;
+       struct drm_map_list *_entry;
+
+       mutex_lock(&dev->struct_mutex);
+
+       map = idr_find(&dev->ctx_idr, request->ctx_id);
+       if (!map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->handle = NULL;
+       list_for_each_entry(_entry, &dev->maplist, head) {
+               if (_entry->map == map) {
+                       request->handle =
+                           (void *)(unsigned long)_entry->user_token;
+                       break;
+               }
+       }
+       if (request->handle == NULL)
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * Set per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the mapping specified in \p arg and update the entry in
+ * drm_device::ctx_idr with it.
+ */
+int drm_setsareactx(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_ctx_priv_map *request = data;
+       struct drm_local_map *map = NULL;
+       struct drm_map_list *r_list = NULL;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               if (r_list->map
+                   && r_list->user_token == (unsigned long) request->handle)
+                       goto found;
+       }
+      bad:
+       mutex_unlock(&dev->struct_mutex);
+       return -EINVAL;
+
+      found:
+       map = r_list->map;
+       if (!map)
+               goto bad;
+
+       if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
+               goto bad;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name The actual DRM context handling routines */
+/*@{*/
+
+/**
+ * Switch context.
+ *
+ * \param dev DRM device.
+ * \param old old context handle.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to set drm_device::context_flag.
+ */
+static int drm_context_switch(struct drm_device * dev, int old, int new)
+{
+       if (test_and_set_bit(0, &dev->context_flag)) {
+               DRM_ERROR("Reentering -- FIXME\n");
+               return -EBUSY;
+       }
+
+       DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+       if (new == dev->last_context) {
+               clear_bit(0, &dev->context_flag);
+               return 0;
+       }
+
+       return 0;
+}
+
+/**
+ * Complete context switch.
+ *
+ * \param dev DRM device.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
+ * hardware lock is held, clears the drm_device::context_flag and wakes up
+ * drm_device::context_wait.
+ */
+static int drm_context_switch_complete(struct drm_device *dev,
+                                      struct drm_file *file_priv, int new)
+{
+       dev->last_context = new;        /* PRE/POST: This is the _only_ writer. */
+       dev->last_switch = jiffies;
+
+       if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
+               DRM_ERROR("Lock isn't held after context switch\n");
+       }
+
+       /* If a context switch is ever initiated
+          when the kernel holds the lock, release
+          that lock here. */
+       clear_bit(0, &dev->context_flag);
+       wake_up(&dev->context_wait);
+
+       return 0;
+}
+
+/**
+ * Reserve contexts.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_res structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_resctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx_res *res = data;
+       struct drm_ctx ctx;
+       int i;
+
+       if (res->count >= DRM_RESERVED_CONTEXTS) {
+               memset(&ctx, 0, sizeof(ctx));
+               for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+                       ctx.handle = i;
+                       if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
+                               return -EFAULT;
+               }
+       }
+       res->count = DRM_RESERVED_CONTEXTS;
+
+       return 0;
+}
+
+/**
+ * Add context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Get a new handle for the context and copy to userspace.
+ */
+int drm_addctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx_list *ctx_entry;
+       struct drm_ctx *ctx = data;
+
+       ctx->handle = drm_ctxbitmap_next(dev);
+       if (ctx->handle == DRM_KERNEL_CONTEXT) {
+               /* Skip kernel's context and get a new one. */
+               ctx->handle = drm_ctxbitmap_next(dev);
+       }
+       DRM_DEBUG("%d\n", ctx->handle);
+       if (ctx->handle == -1) {
+               DRM_DEBUG("Not enough free contexts.\n");
+               /* Should this return -EBUSY instead? */
+               return -ENOMEM;
+       }
+
+       if (ctx->handle != DRM_KERNEL_CONTEXT) {
+               if (dev->driver->context_ctor)
+                       if (!dev->driver->context_ctor(dev, ctx->handle)) {
+                               DRM_DEBUG("Running out of ctxs or memory.\n");
+                               return -ENOMEM;
+                       }
+       }
+
+       ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
+       if (!ctx_entry) {
+               DRM_DEBUG("out of memory\n");
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&ctx_entry->head);
+       ctx_entry->handle = ctx->handle;
+       ctx_entry->tag = file_priv;
+
+       mutex_lock(&dev->ctxlist_mutex);
+       list_add(&ctx_entry->head, &dev->ctxlist);
+       ++dev->ctx_count;
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       return 0;
+}
+
+int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       /* This does nothing */
+       return 0;
+}
+
+/**
+ * Get context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       /* This is 0, because we don't handle any context flags */
+       ctx->flags = 0;
+
+       return 0;
+}
+
+/**
+ * Switch context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch().
+ */
+int drm_switchctx(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       return drm_context_switch(dev, dev->last_context, ctx->handle);
+}
+
+/**
+ * New context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch_complete().
+ */
+int drm_newctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       drm_context_switch_complete(dev, file_priv, ctx->handle);
+
+       return 0;
+}
+
+/**
+ * Remove context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
+ */
+int drm_rmctx(struct drm_device *dev, void *data,
+             struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       if (ctx->handle != DRM_KERNEL_CONTEXT) {
+               if (dev->driver->context_dtor)
+                       dev->driver->context_dtor(dev, ctx->handle);
+               drm_ctxbitmap_free(dev, ctx->handle);
+       }
+
+       mutex_lock(&dev->ctxlist_mutex);
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->handle == ctx->handle) {
+                               list_del(&pos->head);
+                               kfree(pos);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       return 0;
+}
+
+/*@}*/
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_crtc.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_crtc.c
new file mode 100644 (file)
index 0000000..5124401
--- /dev/null
@@ -0,0 +1,2654 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Keith Packard
+ *     Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include <linux/list.h>
+#include "drm.h"
+#include "drmP.h"
+#include "drm_crtc.h"
+
+struct drm_prop_enum_list {
+       int type;
+       char *name;
+};
+
+/* Avoid boilerplate.  I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list)                         \
+       char *fnname(int val)                                   \
+       {                                                       \
+               int i;                                          \
+               for (i = 0; i < ARRAY_SIZE(list); i++) {        \
+                       if (list[i].type == val)                \
+                               return list[i].name;            \
+               }                                               \
+               return "(unknown)";                             \
+       }
+
+/*
+ * Global properties
+ */
+static struct drm_prop_enum_list drm_dpms_enum_list[] =
+{      { DRM_MODE_DPMS_ON, "On" },
+       { DRM_MODE_DPMS_STANDBY, "Standby" },
+       { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+       { DRM_MODE_DPMS_OFF, "Off" }
+};
+
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/*
+ * Optional properties
+ */
+static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+{
+       { DRM_MODE_SCALE_NONE, "None" },
+       { DRM_MODE_SCALE_FULLSCREEN, "Full" },
+       { DRM_MODE_SCALE_CENTER, "Center" },
+       { DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+{
+       { DRM_MODE_DITHERING_OFF, "Off" },
+       { DRM_MODE_DITHERING_ON, "On" },
+};
+
+/*
+ * Non-global properties, but "required" for certain connectors.
+ */
+static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+                drm_dvi_i_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+                drm_tv_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+       { DRM_MODE_DIRTY_OFF,      "Off"      },
+       { DRM_MODE_DIRTY_ON,       "On"       },
+       { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+                drm_dirty_info_enum_list)
+
+struct drm_conn_prop_enum_list {
+       int type;
+       char *name;
+       int count;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+{      { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
+       { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
+       { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
+       { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
+       { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
+       { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
+       { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
+       { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
+       { DRM_MODE_CONNECTOR_Component, "Component", 0 },
+       { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
+       { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
+       { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
+       { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+       { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+};
+
+static struct drm_prop_enum_list drm_encoder_enum_list[] =
+{      { DRM_MODE_ENCODER_NONE, "None" },
+       { DRM_MODE_ENCODER_DAC, "DAC" },
+       { DRM_MODE_ENCODER_TMDS, "TMDS" },
+       { DRM_MODE_ENCODER_LVDS, "LVDS" },
+       { DRM_MODE_ENCODER_TVDAC, "TV" },
+};
+
+char *drm_get_encoder_name(struct drm_encoder *encoder)
+{
+       static char buf[32];
+
+       snprintf(buf, 32, "%s-%d",
+                drm_encoder_enum_list[encoder->encoder_type].name,
+                encoder->base.id);
+       return buf;
+}
+EXPORT_SYMBOL(drm_get_encoder_name);
+
+char *drm_get_connector_name(struct drm_connector *connector)
+{
+       static char buf[32];
+
+       snprintf(buf, 32, "%s-%d",
+                drm_connector_enum_list[connector->connector_type].name,
+                connector->connector_type_id);
+       return buf;
+}
+EXPORT_SYMBOL(drm_get_connector_name);
+
+char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+       if (status == connector_status_connected)
+               return "connected";
+       else if (status == connector_status_disconnected)
+               return "disconnected";
+       else
+               return "unknown";
+}
+
+/**
+ * drm_mode_object_get - allocate a new identifier
+ * @dev: DRM device
+ * @ptr: object pointer, used to generate unique ID
+ * @type: object type
+ *
+ * LOCKING:
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space.  Used
+ * for tracking modes, CRTCs and connectors.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+static int drm_mode_object_get(struct drm_device *dev,
+                              struct drm_mode_object *obj, uint32_t obj_type)
+{
+       int new_id = 0;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Ran out memory getting a mode number\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.idr_mutex);
+       ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+       mutex_unlock(&dev->mode_config.idr_mutex);
+       if (ret == -EAGAIN)
+               goto again;
+
+       obj->id = new_id;
+       obj->type = obj_type;
+       return 0;
+}
+
+/**
+ * drm_mode_object_put - free an identifer
+ * @dev: DRM device
+ * @id: ID to free
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+static void drm_mode_object_put(struct drm_device *dev,
+                               struct drm_mode_object *object)
+{
+       mutex_lock(&dev->mode_config.idr_mutex);
+       idr_remove(&dev->mode_config.crtc_idr, object->id);
+       mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+               uint32_t id, uint32_t type)
+{
+       struct drm_mode_object *obj = NULL;
+
+       mutex_lock(&dev->mode_config.idr_mutex);
+       obj = idr_find(&dev->mode_config.crtc_idr, id);
+       if (!obj || (obj->type != type) || (obj->id != id))
+               obj = NULL;
+       mutex_unlock(&dev->mode_config.idr_mutex);
+
+       return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+                        const struct drm_framebuffer_funcs *funcs)
+{
+       int ret;
+
+       ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+       if (ret) {
+               return ret;
+       }
+
+       fb->dev = dev;
+       fb->funcs = funcs;
+       dev->mode_config.num_fb++;
+       list_add(&fb->head, &dev->mode_config.fb_list);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs in @dev's mode_config.  If they're using @fb, removes
+ * it, setting it to NULL.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = fb->dev;
+       struct drm_crtc *crtc;
+       struct drm_mode_set set;
+       int ret;
+
+       /* remove from any CRTC */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (crtc->fb == fb) {
+                       /* should turn off the crtc */
+                       memset(&set, 0, sizeof(struct drm_mode_set));
+                       set.crtc = crtc;
+                       set.fb = NULL;
+                       ret = crtc->funcs->set_config(&set);
+                       if (ret)
+                               DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+               }
+       }
+
+       drm_mode_object_put(dev, &fb->base);
+       list_del(&fb->head);
+       dev->mode_config.num_fb--;
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_crtc_init - Initialise a new CRTC object
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Inits a new object created as base part of an driver crtc object.
+ */
+void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+                  const struct drm_crtc_funcs *funcs)
+{
+       crtc->dev = dev;
+       crtc->funcs = funcs;
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+
+       list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+       dev->mode_config.num_crtc++;
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_crtc_init);
+
+/**
+ * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * @crtc: CRTC to cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Cleanup @crtc. Removes from drm modesetting space
+ * does NOT free object, caller does that.
+ */
+void drm_crtc_cleanup(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+
+       if (crtc->gamma_store) {
+               kfree(crtc->gamma_store);
+               crtc->gamma_store = NULL;
+       }
+
+       drm_mode_object_put(dev, &crtc->base);
+       list_del(&crtc->head);
+       dev->mode_config.num_crtc--;
+}
+EXPORT_SYMBOL(drm_crtc_cleanup);
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Add @mode to @connector's mode list for later use.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+                        struct drm_display_mode *mode)
+{
+       list_add(&mode->head, &connector->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Remove @mode from @connector's mode list, then free it.
+ */
+void drm_mode_remove(struct drm_connector *connector,
+                    struct drm_display_mode *mode)
+{
+       list_del(&mode->head);
+       kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_remove);
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @name: user visible name of the connector
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ */
+void drm_connector_init(struct drm_device *dev,
+                    struct drm_connector *connector,
+                    const struct drm_connector_funcs *funcs,
+                    int connector_type)
+{
+       mutex_lock(&dev->mode_config.mutex);
+
+       connector->dev = dev;
+       connector->funcs = funcs;
+       drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+       connector->connector_type = connector_type;
+       connector->connector_type_id =
+               ++drm_connector_enum_list[connector_type].count; /* TODO */
+       INIT_LIST_HEAD(&connector->user_modes);
+       INIT_LIST_HEAD(&connector->probed_modes);
+       INIT_LIST_HEAD(&connector->modes);
+       connector->edid_blob_ptr = NULL;
+
+       list_add_tail(&connector->head, &dev->mode_config.connector_list);
+       dev->mode_config.num_connector++;
+
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.edid_property, 0);
+
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.dpms_property, 0);
+
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode, *t;
+
+       list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+               drm_mode_remove(connector, mode);
+
+       list_for_each_entry_safe(mode, t, &connector->modes, head)
+               drm_mode_remove(connector, mode);
+
+       list_for_each_entry_safe(mode, t, &connector->user_modes, head)
+               drm_mode_remove(connector, mode);
+
+       kfree(connector->fb_helper_private);
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_put(dev, &connector->base);
+       list_del(&connector->head);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+void drm_encoder_init(struct drm_device *dev,
+                     struct drm_encoder *encoder,
+                     const struct drm_encoder_funcs *funcs,
+                     int encoder_type)
+{
+       mutex_lock(&dev->mode_config.mutex);
+
+       encoder->dev = dev;
+
+       drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+       encoder->encoder_type = encoder_type;
+       encoder->funcs = funcs;
+
+       list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+       dev->mode_config.num_encoder++;
+
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_put(dev, &encoder->base);
+       list_del(&encoder->head);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+       struct drm_display_mode *nmode;
+
+       nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+       if (!nmode)
+               return NULL;
+
+       drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
+       return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       drm_mode_object_put(dev, &mode->base);
+
+       kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+{
+       struct drm_property *edid;
+       struct drm_property *dpms;
+       int i;
+
+       /*
+        * Standard properties (apply to all connectors)
+        */
+       edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+                                  DRM_MODE_PROP_IMMUTABLE,
+                                  "EDID", 0);
+       dev->mode_config.edid_property = edid;
+
+       dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                  "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
+               drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
+                                     drm_dpms_enum_list[i].name);
+       dev->mode_config.dpms_property = dpms;
+
+       return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+       struct drm_property *dvi_i_selector;
+       struct drm_property *dvi_i_subconnector;
+       int i;
+
+       if (dev->mode_config.dvi_i_select_subconnector_property)
+               return 0;
+
+       dvi_i_selector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                   "select subconnector",
+                                   ARRAY_SIZE(drm_dvi_i_select_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
+               drm_property_add_enum(dvi_i_selector, i,
+                                     drm_dvi_i_select_enum_list[i].type,
+                                     drm_dvi_i_select_enum_list[i].name);
+       dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+       dvi_i_subconnector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE,
+                                   "subconnector",
+                                   ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
+               drm_property_add_enum(dvi_i_subconnector, i,
+                                     drm_dvi_i_subconnector_enum_list[i].type,
+                                     drm_dvi_i_subconnector_enum_list[i].name);
+       dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.  Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+                                 char *modes[])
+{
+       struct drm_property *tv_selector;
+       struct drm_property *tv_subconnector;
+       int i;
+
+       if (dev->mode_config.tv_select_subconnector_property)
+               return 0;
+
+       /*
+        * Basic connector properties
+        */
+       tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                         "select subconnector",
+                                         ARRAY_SIZE(drm_tv_select_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
+               drm_property_add_enum(tv_selector, i,
+                                     drm_tv_select_enum_list[i].type,
+                                     drm_tv_select_enum_list[i].name);
+       dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+       tv_subconnector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE, "subconnector",
+                                   ARRAY_SIZE(drm_tv_subconnector_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
+               drm_property_add_enum(tv_subconnector, i,
+                                     drm_tv_subconnector_enum_list[i].type,
+                                     drm_tv_subconnector_enum_list[i].name);
+       dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+       /*
+        * Other, TV specific properties: margins & TV modes.
+        */
+       dev->mode_config.tv_left_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "left margin", 2);
+       dev->mode_config.tv_left_margin_property->values[0] = 0;
+       dev->mode_config.tv_left_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_right_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "right margin", 2);
+       dev->mode_config.tv_right_margin_property->values[0] = 0;
+       dev->mode_config.tv_right_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_top_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "top margin", 2);
+       dev->mode_config.tv_top_margin_property->values[0] = 0;
+       dev->mode_config.tv_top_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_bottom_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "bottom margin", 2);
+       dev->mode_config.tv_bottom_margin_property->values[0] = 0;
+       dev->mode_config.tv_bottom_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_mode_property =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                   "mode", num_modes);
+       for (i = 0; i < num_modes; i++)
+               drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+                                     i, modes[i]);
+
+       dev->mode_config.tv_brightness_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "brightness", 2);
+       dev->mode_config.tv_brightness_property->values[0] = 0;
+       dev->mode_config.tv_brightness_property->values[1] = 100;
+
+       dev->mode_config.tv_contrast_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "contrast", 2);
+       dev->mode_config.tv_contrast_property->values[0] = 0;
+       dev->mode_config.tv_contrast_property->values[1] = 100;
+
+       dev->mode_config.tv_flicker_reduction_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "flicker reduction", 2);
+       dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
+       dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
+
+       dev->mode_config.tv_overscan_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "overscan", 2);
+       dev->mode_config.tv_overscan_property->values[0] = 0;
+       dev->mode_config.tv_overscan_property->values[1] = 100;
+
+       dev->mode_config.tv_saturation_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "saturation", 2);
+       dev->mode_config.tv_saturation_property->values[0] = 0;
+       dev->mode_config.tv_saturation_property->values[1] = 100;
+
+       dev->mode_config.tv_hue_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "hue", 2);
+       dev->mode_config.tv_hue_property->values[0] = 0;
+       dev->mode_config.tv_hue_property->values[1] = 100;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+       struct drm_property *scaling_mode;
+       int i;
+
+       if (dev->mode_config.scaling_mode_property)
+               return 0;
+
+       scaling_mode =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
+                                   ARRAY_SIZE(drm_scaling_mode_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
+               drm_property_add_enum(scaling_mode, i,
+                                     drm_scaling_mode_enum_list[i].type,
+                                     drm_scaling_mode_enum_list[i].name);
+
+       dev->mode_config.scaling_mode_property = scaling_mode;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_dithering_property - create dithering property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dithering_property(struct drm_device *dev)
+{
+       struct drm_property *dithering_mode;
+       int i;
+
+       if (dev->mode_config.dithering_mode_property)
+               return 0;
+
+       dithering_mode =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
+                                   ARRAY_SIZE(drm_dithering_mode_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
+               drm_property_add_enum(dithering_mode, i,
+                                     drm_dithering_mode_enum_list[i].type,
+                                     drm_dithering_mode_enum_list[i].name);
+       dev->mode_config.dithering_mode_property = dithering_mode;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dithering_property);
+
+/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+       struct drm_property *dirty_info;
+       int i;
+
+       if (dev->mode_config.dirty_info_property)
+               return 0;
+
+       dirty_info =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE,
+                                   "dirty",
+                                   ARRAY_SIZE(drm_dirty_info_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
+               drm_property_add_enum(dirty_info, i,
+                                     drm_dirty_info_enum_list[i].type,
+                                     drm_dirty_info_enum_list[i].name);
+       dev->mode_config.dirty_info_property = dirty_info;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * None, should happen single threaded at init time.
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+       mutex_init(&dev->mode_config.mutex);
+       mutex_init(&dev->mode_config.idr_mutex);
+       INIT_LIST_HEAD(&dev->mode_config.fb_list);
+       INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
+       INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+       INIT_LIST_HEAD(&dev->mode_config.connector_list);
+       INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+       idr_init(&dev->mode_config.crtc_idr);
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_create_standard_connector_properties(dev);
+       mutex_unlock(&dev->mode_config.mutex);
+
+       /* Just to be sure */
+       dev->mode_config.num_fb = 0;
+       dev->mode_config.num_connector = 0;
+       dev->mode_config.num_crtc = 0;
+       dev->mode_config.num_encoder = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+{
+       uint32_t total_objects = 0;
+
+       total_objects += dev->mode_config.num_crtc;
+       total_objects += dev->mode_config.num_connector;
+       total_objects += dev->mode_config.num_encoder;
+
+       if (total_objects == 0)
+               return -EINVAL;
+
+       group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
+       if (!group->id_list)
+               return -ENOMEM;
+
+       group->num_crtcs = 0;
+       group->num_connectors = 0;
+       group->num_encoders = 0;
+       return 0;
+}
+
+int drm_mode_group_init_legacy_group(struct drm_device *dev,
+                                    struct drm_mode_group *group)
+{
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       struct drm_connector *connector;
+       int ret;
+
+       if ((ret = drm_mode_group_init(dev, group)))
+               return ret;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               group->id_list[group->num_crtcs++] = crtc->base.id;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+               group->id_list[group->num_crtcs + group->num_encoders++] =
+               encoder->base.id;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               group->id_list[group->num_crtcs + group->num_encoders +
+                              group->num_connectors++] = connector->base.id;
+
+       return 0;
+}
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+       struct drm_connector *connector, *ot;
+       struct drm_crtc *crtc, *ct;
+       struct drm_encoder *encoder, *enct;
+       struct drm_framebuffer *fb, *fbt;
+       struct drm_property *property, *pt;
+
+       list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+                                head) {
+               encoder->funcs->destroy(encoder);
+       }
+
+       list_for_each_entry_safe(connector, ot,
+                                &dev->mode_config.connector_list, head) {
+               connector->funcs->destroy(connector);
+       }
+
+       list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+                                head) {
+               drm_property_destroy(dev, property);
+       }
+
+       list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+               fb->funcs->destroy(fb);
+       }
+
+       list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+               crtc->funcs->destroy(crtc);
+       }
+
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+                              struct drm_display_mode *in)
+{
+       out->clock = in->clock;
+       out->hdisplay = in->hdisplay;
+       out->hsync_start = in->hsync_start;
+       out->hsync_end = in->hsync_end;
+       out->htotal = in->htotal;
+       out->hskew = in->hskew;
+       out->vdisplay = in->vdisplay;
+       out->vsync_start = in->vsync_start;
+       out->vsync_end = in->vsync_end;
+       out->vtotal = in->vtotal;
+       out->vscan = in->vscan;
+       out->vrefresh = in->vrefresh;
+       out->flags = in->flags;
+       out->type = in->type;
+       strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+       out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ */
+void drm_crtc_convert_umode(struct drm_display_mode *out,
+                           struct drm_mode_modeinfo *in)
+{
+       out->clock = in->clock;
+       out->hdisplay = in->hdisplay;
+       out->hsync_start = in->hsync_start;
+       out->hsync_end = in->hsync_end;
+       out->htotal = in->htotal;
+       out->hskew = in->hskew;
+       out->vdisplay = in->vdisplay;
+       out->vsync_start = in->vsync_start;
+       out->vsync_end = in->vsync_end;
+       out->vtotal = in->vtotal;
+       out->vscan = in->vscan;
+       out->vrefresh = in->vrefresh;
+       out->flags = in->flags;
+       out->type = in->type;
+       strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+       out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_card_res *card_res = data;
+       struct list_head *lh;
+       struct drm_framebuffer *fb;
+       struct drm_connector *connector;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       int ret = 0;
+       int connector_count = 0;
+       int crtc_count = 0;
+       int fb_count = 0;
+       int encoder_count = 0;
+       int copied = 0, i;
+       uint32_t __user *fb_id;
+       uint32_t __user *crtc_id;
+       uint32_t __user *connector_id;
+       uint32_t __user *encoder_id;
+       struct drm_mode_group *mode_group;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /*
+        * For the non-control nodes we need to limit the list of resources
+        * by IDs in the group list for this node
+        */
+       list_for_each(lh, &file_priv->fbs)
+               fb_count++;
+
+       mode_group = &file_priv->master->minor->mode_group;
+       if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+
+               list_for_each(lh, &dev->mode_config.crtc_list)
+                       crtc_count++;
+
+               list_for_each(lh, &dev->mode_config.connector_list)
+                       connector_count++;
+
+               list_for_each(lh, &dev->mode_config.encoder_list)
+                       encoder_count++;
+       } else {
+
+               crtc_count = mode_group->num_crtcs;
+               connector_count = mode_group->num_connectors;
+               encoder_count = mode_group->num_encoders;
+       }
+
+       card_res->max_height = dev->mode_config.max_height;
+       card_res->min_height = dev->mode_config.min_height;
+       card_res->max_width = dev->mode_config.max_width;
+       card_res->min_width = dev->mode_config.min_width;
+
+       /* handle this in 4 parts */
+       /* FBs */
+       if (card_res->count_fbs >= fb_count) {
+               copied = 0;
+               fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+               list_for_each_entry(fb, &file_priv->fbs, head) {
+                       if (put_user(fb->base.id, fb_id + copied)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       card_res->count_fbs = fb_count;
+
+       /* CRTCs */
+       if (card_res->count_crtcs >= crtc_count) {
+               copied = 0;
+               crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                                           head) {
+                               DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id);
+                               if (put_user(crtc->base.id, crtc_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       for (i = 0; i < mode_group->num_crtcs; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            crtc_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       card_res->count_crtcs = crtc_count;
+
+       /* Encoders */
+       if (card_res->count_encoders >= encoder_count) {
+               copied = 0;
+               encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(encoder,
+                                           &dev->mode_config.encoder_list,
+                                           head) {
+                               DRM_DEBUG_KMS("ENCODER ID is %d\n",
+                                         encoder->base.id);
+                               if (put_user(encoder->base.id, encoder_id +
+                                            copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            encoder_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+
+               }
+       }
+       card_res->count_encoders = encoder_count;
+
+       /* Connectors */
+       if (card_res->count_connectors >= connector_count) {
+               copied = 0;
+               connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(connector,
+                                           &dev->mode_config.connector_list,
+                                           head) {
+                               DRM_DEBUG_KMS("CONNECTOR ID is %d\n",
+                                         connector->base.id);
+                               if (put_user(connector->base.id,
+                                            connector_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       int start = mode_group->num_crtcs +
+                               mode_group->num_encoders;
+                       for (i = start; i < start + mode_group->num_connectors; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            connector_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       card_res->count_connectors = connector_count;
+
+       DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs,
+                 card_res->count_connectors, card_res->count_encoders);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getcrtc(struct drm_device *dev,
+                    void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc *crtc_resp = data;
+       struct drm_crtc *crtc;
+       struct drm_mode_object *obj;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+                                  DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       crtc_resp->x = crtc->x;
+       crtc_resp->y = crtc->y;
+       crtc_resp->gamma_size = crtc->gamma_size;
+       if (crtc->fb)
+               crtc_resp->fb_id = crtc->fb->base.id;
+       else
+               crtc_resp->fb_id = 0;
+
+       if (crtc->enabled) {
+
+               drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+               crtc_resp->mode_valid = 1;
+
+       } else {
+               crtc_resp->mode_valid = 0;
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getconnector - get connector configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a connector configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_get_connector *out_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_connector *connector;
+       struct drm_display_mode *mode;
+       int mode_count = 0;
+       int props_count = 0;
+       int encoders_count = 0;
+       int ret = 0;
+       int copied = 0;
+       int i;
+       struct drm_mode_modeinfo u_mode;
+       struct drm_mode_modeinfo __user *mode_ptr;
+       uint32_t __user *prop_ptr;
+       uint64_t __user *prop_values;
+       uint32_t __user *encoder_ptr;
+
+       memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+       DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id);
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, out_resp->connector_id,
+                                  DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] != 0) {
+                       props_count++;
+               }
+       }
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] != 0) {
+                       encoders_count++;
+               }
+       }
+
+       if (out_resp->count_modes == 0) {
+               connector->funcs->fill_modes(connector,
+                                            dev->mode_config.max_width,
+                                            dev->mode_config.max_height);
+       }
+
+       /* delayed so we get modes regardless of pre-fill_modes state */
+       list_for_each_entry(mode, &connector->modes, head)
+               mode_count++;
+
+       out_resp->connector_id = connector->base.id;
+       out_resp->connector_type = connector->connector_type;
+       out_resp->connector_type_id = connector->connector_type_id;
+       out_resp->mm_width = connector->display_info.width_mm;
+       out_resp->mm_height = connector->display_info.height_mm;
+       out_resp->subpixel = connector->display_info.subpixel_order;
+       out_resp->connection = connector->status;
+       if (connector->encoder)
+               out_resp->encoder_id = connector->encoder->base.id;
+       else
+               out_resp->encoder_id = 0;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if ((out_resp->count_modes >= mode_count) && mode_count) {
+               copied = 0;
+               mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+               list_for_each_entry(mode, &connector->modes, head) {
+                       drm_crtc_convert_to_umode(&u_mode, mode);
+                       if (copy_to_user(mode_ptr + copied,
+                                        &u_mode, sizeof(u_mode))) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       out_resp->count_modes = mode_count;
+
+       if ((out_resp->count_props >= props_count) && props_count) {
+               copied = 0;
+               prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
+               prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+               for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+                       if (connector->property_ids[i] != 0) {
+                               if (put_user(connector->property_ids[i],
+                                            prop_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+
+                               if (put_user(connector->property_values[i],
+                                            prop_values + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_props = props_count;
+
+       if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+               copied = 0;
+               encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+                       if (connector->encoder_ids[i] != 0) {
+                               if (put_user(connector->encoder_ids[i],
+                                            encoder_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_encoders = encoders_count;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_mode_get_encoder *enc_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+                                  DRM_MODE_OBJECT_ENCODER);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       encoder = obj_to_encoder(obj);
+
+       if (encoder->crtc)
+               enc_resp->crtc_id = encoder->crtc->base.id;
+       else
+               enc_resp->crtc_id = 0;
+       enc_resp->encoder_type = encoder->encoder_type;
+       enc_resp->encoder_id = encoder->base.id;
+       enc_resp->possible_crtcs = encoder->possible_crtcs;
+       enc_resp->possible_clones = encoder->possible_clones;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setcrtc(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_mode_crtc *crtc_req = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc, *crtcfb;
+       struct drm_connector **connector_set = NULL, *connector;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_display_mode *mode = NULL;
+       struct drm_mode_set set;
+       uint32_t __user *set_connectors_ptr;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+                                  DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       if (crtc_req->mode_valid) {
+               /* If we have a mode we need a framebuffer. */
+               /* If we pass -1, set the mode with the currently bound fb */
+               if (crtc_req->fb_id == -1) {
+                       list_for_each_entry(crtcfb,
+                                           &dev->mode_config.crtc_list, head) {
+                               if (crtcfb == crtc) {
+                                       DRM_DEBUG_KMS("Using current fb for "
+                                                       "setmode\n");
+                                       fb = crtc->fb;
+                               }
+                       }
+               } else {
+                       obj = drm_mode_object_find(dev, crtc_req->fb_id,
+                                                  DRM_MODE_OBJECT_FB);
+                       if (!obj) {
+                               DRM_DEBUG_KMS("Unknown FB ID%d\n",
+                                               crtc_req->fb_id);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       fb = obj_to_fb(obj);
+               }
+
+               mode = drm_mode_create(dev);
+               drm_crtc_convert_umode(mode, &crtc_req->mode);
+               drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+       }
+
+       if (crtc_req->count_connectors == 0 && mode) {
+               DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
+               DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
+                         crtc_req->count_connectors);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->count_connectors > 0) {
+               u32 out_id;
+
+               /* Avoid unbounded kernel memory allocation */
+               if (crtc_req->count_connectors > config->num_connector) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               connector_set = kmalloc(crtc_req->count_connectors *
+                                       sizeof(struct drm_connector *),
+                                       GFP_KERNEL);
+               if (!connector_set) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               for (i = 0; i < crtc_req->count_connectors; i++) {
+                       set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+                       if (get_user(out_id, &set_connectors_ptr[i])) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+
+                       obj = drm_mode_object_find(dev, out_id,
+                                                  DRM_MODE_OBJECT_CONNECTOR);
+                       if (!obj) {
+                               DRM_DEBUG_KMS("Connector id %d unknown\n",
+                                               out_id);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       connector = obj_to_connector(obj);
+
+                       connector_set[i] = connector;
+               }
+       }
+
+       set.crtc = crtc;
+       set.x = crtc_req->x;
+       set.y = crtc_req->y;
+       set.mode = mode;
+       set.connectors = connector_set;
+       set.num_connectors = crtc_req->count_connectors;
+       set.fb = fb;
+       ret = crtc->funcs->set_config(&set);
+
+out:
+       kfree(connector_set);
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+                       void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_cursor *req = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       if (!req->flags) {
+               DRM_ERROR("no operation set\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       if (req->flags & DRM_MODE_CURSOR_BO) {
+               if (!crtc->funcs->cursor_set) {
+                       DRM_ERROR("crtc does not support cursor\n");
+                       ret = -ENXIO;
+                       goto out;
+               }
+               /* Turns off the cursor if handle is 0 */
+               ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+                                             req->width, req->height);
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_MOVE) {
+               if (crtc->funcs->cursor_move) {
+                       ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+               } else {
+                       DRM_ERROR("crtc does not support cursor\n");
+                       ret = -EFAULT;
+                       goto out;
+               }
+       }
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       if ((config->min_width > r->width) || (r->width > config->max_width)) {
+               DRM_ERROR("mode new framebuffer width not within limits\n");
+               return -EINVAL;
+       }
+       if ((config->min_height > r->height) || (r->height > config->max_height)) {
+               DRM_ERROR("mode new framebuffer height not within limits\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /* TODO check buffer is sufficently large */
+       /* TODO setup destructor callback */
+
+       fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+       if (!fb) {
+               DRM_ERROR("could not create framebuffer\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       r->fb_id = fb->base.id;
+       list_add(&fb->filp_head, &file_priv->fbs);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_framebuffer *fbl = NULL;
+       uint32_t *id = data;
+       int ret = 0;
+       int found = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
+       /* TODO check that we realy get a framebuffer back. */
+       if (!obj) {
+               DRM_ERROR("mode invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       fb = obj_to_fb(obj);
+
+       list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+               if (fb == fbl)
+                       found = 1;
+
+       if (!found) {
+               DRM_ERROR("tried to remove a fb that we didn't own\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* TODO release all crtc connected to the framebuffer */
+       /* TODO unhock the destructor from the buffer object */
+
+       list_del(&fb->filp_head);
+       fb->funcs->destroy(fb);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj) {
+               DRM_ERROR("invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       fb = obj_to_fb(obj);
+
+       r->height = fb->height;
+       r->width = fb->width;
+       r->depth = fb->depth;
+       r->bpp = fb->bits_per_pixel;
+       r->pitch = fb->pitch;
+       fb->funcs->create_handle(fb, file_priv, &r->handle);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_clip_rect __user *clips_ptr;
+       struct drm_clip_rect *clips = NULL;
+       struct drm_mode_fb_dirty_cmd *r = data;
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb;
+       unsigned flags;
+       int num_clips;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj) {
+               DRM_ERROR("invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out_err1;
+       }
+       fb = obj_to_fb(obj);
+
+       num_clips = r->num_clips;
+       clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+       if (!num_clips != !clips_ptr) {
+               ret = -EINVAL;
+               goto out_err1;
+       }
+
+       flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+       /* If userspace annotates copy, clips must come in pairs */
+       if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+               ret = -EINVAL;
+               goto out_err1;
+       }
+
+       if (num_clips && clips_ptr) {
+               clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+               if (!clips) {
+                       ret = -ENOMEM;
+                       goto out_err1;
+               }
+
+               ret = copy_from_user(clips, clips_ptr,
+                                    num_clips * sizeof(*clips));
+               if (ret)
+                       goto out_err2;
+       }
+
+       if (fb->funcs->dirty) {
+               ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+       } else {
+               ret = -ENOSYS;
+               goto out_err2;
+       }
+
+out_err2:
+       kfree(clips);
+out_err1:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @filp: file * from the ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_framebuffer *fb, *tfb;
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+               list_del(&fb->filp_head);
+               fb->funcs->destroy(fb);
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+/**
+ * drm_mode_attachmode - add a mode to the user mode list
+ * @dev: DRM device
+ * @connector: connector to add the mode to
+ * @mode: mode to add
+ *
+ * Add @mode to @connector's user mode list.
+ */
+static int drm_mode_attachmode(struct drm_device *dev,
+                              struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       int ret = 0;
+
+       list_add_tail(&mode->head, &connector->user_modes);
+       return ret;
+}
+
+int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
+                            struct drm_display_mode *mode)
+{
+       struct drm_connector *connector;
+       int ret = 0;
+       struct drm_display_mode *dup_mode;
+       int need_dup = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (!connector->encoder)
+                       break;
+               if (connector->encoder->crtc == crtc) {
+                       if (need_dup)
+                               dup_mode = drm_mode_duplicate(dev, mode);
+                       else
+                               dup_mode = mode;
+                       ret = drm_mode_attachmode(dev, connector, dup_mode);
+                       if (ret)
+                               return ret;
+                       need_dup = 1;
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
+
+static int drm_mode_detachmode(struct drm_device *dev,
+                              struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       int found = 0;
+       int ret = 0;
+       struct drm_display_mode *match_mode, *t;
+
+       list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
+               if (drm_mode_equal(match_mode, mode)) {
+                       list_del(&match_mode->head);
+                       drm_mode_destroy(dev, match_mode);
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               ret = -EINVAL;
+
+       return ret;
+}
+
+int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               drm_mode_detachmode(dev, connector, mode);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
+
+/**
+ * drm_fb_attachmode - Attach a user mode to an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * This attaches a user specified mode to an connector.
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_attachmode_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_mode_cmd *mode_cmd = data;
+       struct drm_connector *connector;
+       struct drm_display_mode *mode;
+       struct drm_mode_object *obj;
+       struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       mode = drm_mode_create(dev);
+       if (!mode) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       drm_crtc_convert_umode(mode, umode);
+
+       ret = drm_mode_attachmode(dev, connector, mode);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+
+/**
+ * drm_fb_detachmode - Detach a user specified mode from an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_detachmode_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_mode_cmd *mode_cmd = data;
+       struct drm_connector *connector;
+       struct drm_display_mode mode;
+       struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       drm_crtc_convert_umode(&mode, umode);
+       ret = drm_mode_detachmode(dev, connector, &mode);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+                                        const char *name, int num_values)
+{
+       struct drm_property *property = NULL;
+
+       property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+       if (!property)
+               return NULL;
+
+       if (num_values) {
+               property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+               if (!property->values)
+                       goto fail;
+       }
+
+       drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+       property->flags = flags;
+       property->num_values = num_values;
+       INIT_LIST_HEAD(&property->enum_blob_list);
+
+       if (name)
+               strncpy(property->name, name, DRM_PROP_NAME_LEN);
+
+       list_add_tail(&property->head, &dev->mode_config.property_list);
+       return property;
+fail:
+       kfree(property);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+int drm_property_add_enum(struct drm_property *property, int index,
+                         uint64_t value, const char *name)
+{
+       struct drm_property_enum *prop_enum;
+
+       if (!(property->flags & DRM_MODE_PROP_ENUM))
+               return -EINVAL;
+
+       if (!list_empty(&property->enum_blob_list)) {
+               list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+                       if (prop_enum->value == value) {
+                               strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+                               prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+                               return 0;
+                       }
+               }
+       }
+
+       prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+       if (!prop_enum)
+               return -ENOMEM;
+
+       strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+       prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+       prop_enum->value = value;
+
+       property->values[index] = value;
+       list_add_tail(&prop_enum->head, &property->enum_blob_list);
+       return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+       struct drm_property_enum *prop_enum, *pt;
+
+       list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+               list_del(&prop_enum->head);
+               kfree(prop_enum);
+       }
+
+       if (property->num_values)
+               kfree(property->values);
+       drm_mode_object_put(dev, &property->base);
+       list_del(&property->head);
+       kfree(property);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+int drm_connector_attach_property(struct drm_connector *connector,
+                              struct drm_property *property, uint64_t init_val)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == 0) {
+                       connector->property_ids[i] = property->base.id;
+                       connector->property_values[i] = init_val;
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_property);
+
+int drm_connector_property_set_value(struct drm_connector *connector,
+                                 struct drm_property *property, uint64_t value)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == property->base.id) {
+                       connector->property_values[i] = value;
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+                                 struct drm_property *property, uint64_t *val)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == property->base.id) {
+                       *val = connector->property_values[i];
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_get_property *out_resp = data;
+       struct drm_property *property;
+       int enum_count = 0;
+       int blob_count = 0;
+       int value_count = 0;
+       int ret = 0, i;
+       int copied;
+       struct drm_property_enum *prop_enum;
+       struct drm_mode_property_enum __user *enum_ptr;
+       struct drm_property_blob *prop_blob;
+       uint32_t *blob_id_ptr;
+       uint64_t __user *values_ptr;
+       uint32_t __user *blob_length_ptr;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+       if (!obj) {
+               ret = -EINVAL;
+               goto done;
+       }
+       property = obj_to_property(obj);
+
+       if (property->flags & DRM_MODE_PROP_ENUM) {
+               list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+                       enum_count++;
+       } else if (property->flags & DRM_MODE_PROP_BLOB) {
+               list_for_each_entry(prop_blob, &property->enum_blob_list, head)
+                       blob_count++;
+       }
+
+       value_count = property->num_values;
+
+       strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+       out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+       out_resp->flags = property->flags;
+
+       if ((out_resp->count_values >= value_count) && value_count) {
+               values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+               for (i = 0; i < value_count; i++) {
+                       if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+                               ret = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+       out_resp->count_values = value_count;
+
+       if (property->flags & DRM_MODE_PROP_ENUM) {
+               if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+                       copied = 0;
+                       enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+                       list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+
+                               if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               if (copy_to_user(&enum_ptr[copied].name,
+                                                &prop_enum->name, DRM_PROP_NAME_LEN)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+                               copied++;
+                       }
+               }
+               out_resp->count_enum_blobs = enum_count;
+       }
+
+       if (property->flags & DRM_MODE_PROP_BLOB) {
+               if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+                       copied = 0;
+                       blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
+                       blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+
+                       list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+                               if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               if (put_user(prop_blob->length, blob_length_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               copied++;
+                       }
+               }
+               out_resp->count_enum_blobs = blob_count;
+       }
+done:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
+                                                         void *data)
+{
+       struct drm_property_blob *blob;
+
+       if (!length || !data)
+               return NULL;
+
+       blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+       if (!blob)
+               return NULL;
+
+       blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
+       blob->length = length;
+
+       memcpy(blob->data, data, length);
+
+       drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+
+       list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
+       return blob;
+}
+
+static void drm_property_destroy_blob(struct drm_device *dev,
+                              struct drm_property_blob *blob)
+{
+       drm_mode_object_put(dev, &blob->base);
+       list_del(&blob->head);
+       kfree(blob);
+}
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_get_blob *out_resp = data;
+       struct drm_property_blob *blob;
+       int ret = 0;
+       void *blob_ptr;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+       if (!obj) {
+               ret = -EINVAL;
+               goto done;
+       }
+       blob = obj_to_blob(obj);
+
+       if (out_resp->length == blob->length) {
+               blob_ptr = (void *)(unsigned long)out_resp->data;
+               if (copy_to_user(blob_ptr, blob->data, blob->length)){
+                       ret = -EFAULT;
+                       goto done;
+               }
+       }
+       out_resp->length = blob->length;
+
+done:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+                                           struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       int ret = 0;
+
+       if (connector->edid_blob_ptr)
+               drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+
+       /* Delete edid, when there is none. */
+       if (!edid) {
+               connector->edid_blob_ptr = NULL;
+               ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+               return ret;
+       }
+
+       connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+
+       ret = drm_connector_property_set_value(connector,
+                                              dev->mode_config.edid_property,
+                                              connector->edid_blob_ptr->base.id);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+                                      void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_connector_set_property *out_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_property *property;
+       struct drm_connector *connector;
+       int ret = -EINVAL;
+       int i;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == out_resp->prop_id)
+                       break;
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+               goto out;
+       }
+
+       obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+       if (!obj) {
+               goto out;
+       }
+       property = obj_to_property(obj);
+
+       if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+               goto out;
+
+       if (property->flags & DRM_MODE_PROP_RANGE) {
+               if (out_resp->value < property->values[0])
+                       goto out;
+
+               if (out_resp->value > property->values[1])
+                       goto out;
+       } else {
+               int found = 0;
+               for (i = 0; i < property->num_values; i++) {
+                       if (property->values[i] == out_resp->value) {
+                               found = 1;
+                               break;
+                       }
+               }
+               if (!found) {
+                       goto out;
+               }
+       }
+
+       /* Do DPMS ourselves */
+       if (property == connector->dev->mode_config.dpms_property) {
+               if (connector->funcs->dpms)
+                       (*connector->funcs->dpms)(connector, (int) out_resp->value);
+               ret = 0;
+       } else if (connector->funcs->set_property)
+               ret = connector->funcs->set_property(connector, property, out_resp->value);
+
+       /* store the property value if successful */
+       if (!ret)
+               drm_connector_property_set_value(connector, property, out_resp->value);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+                                     struct drm_encoder *encoder)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == 0) {
+                       connector->encoder_ids[i] = encoder->base.id;
+                       return 0;
+               }
+       }
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+                                   struct drm_encoder *encoder)
+{
+       int i;
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == encoder->base.id) {
+                       connector->encoder_ids[i] = 0;
+                       if (connector->encoder == encoder)
+                               connector->encoder = NULL;
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
+
+bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+                                 int gamma_size)
+{
+       crtc->gamma_size = gamma_size;
+
+       crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+       if (!crtc->gamma_store) {
+               crtc->gamma_size = 0;
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+
+}
+
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_page_flip *page_flip = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+       struct drm_pending_vblank_event *e = NULL;
+       unsigned long flags;
+       int ret = -EINVAL;
+
+       if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+           page_flip->reserved != 0)
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj)
+               goto out;
+       crtc = obj_to_crtc(obj);
+
+       if (crtc->funcs->page_flip == NULL)
+               goto out;
+
+       obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj)
+               goto out;
+       fb = obj_to_fb(obj);
+
+       if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+               ret = -ENOMEM;
+               spin_lock_irqsave(&dev->event_lock, flags);
+               if (file_priv->event_space < sizeof e->event) {
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       goto out;
+               }
+               file_priv->event_space -= sizeof e->event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+
+               e = kzalloc(sizeof *e, GFP_KERNEL);
+               if (e == NULL) {
+                       spin_lock_irqsave(&dev->event_lock, flags);
+                       file_priv->event_space += sizeof e->event;
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       goto out;
+               }
+
+               e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+               e->event.base.length = sizeof e->event;
+               e->event.user_data = page_flip->user_data;
+               e->base.event = &e->event.base;
+               e->base.file_priv = file_priv;
+               e->base.destroy =
+                       (void (*) (struct drm_pending_event *)) kfree;
+       }
+
+       ret = crtc->funcs->page_flip(crtc, fb, e);
+       if (ret) {
+               spin_lock_irqsave(&dev->event_lock, flags);
+               file_priv->event_space += sizeof e->event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+               kfree(e);
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_debugfs.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_debugfs.c
new file mode 100644 (file)
index 0000000..9903f27
--- /dev/null
@@ -0,0 +1,236 @@
+/**
+ * \file drm_debugfs.c
+ * debugfs support for DRM
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+static struct drm_info_list drm_debugfs_list[] = {
+       {"name", drm_name_info, 0},
+       {"vm", drm_vm_info, 0},
+       {"clients", drm_clients_info, 0},
+       {"queues", drm_queues_info, 0},
+       {"bufs", drm_bufs_info, 0},
+       {"gem_names", drm_gem_name_info, DRIVER_GEM},
+       {"gem_objects", drm_gem_object_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+       {"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
+
+
+static int drm_debugfs_open(struct inode *inode, struct file *file)
+{
+       struct drm_info_node *node = inode->i_private;
+
+       return single_open(file, node->info_ent->show, node);
+}
+
+
+static const struct file_operations drm_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_debugfs_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+
+/**
+ * Initialize a given set of debugfs files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI debugfs dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of debugfs files represented by an array of
+ * gdm_debugfs_lists in the given root directory.
+ */
+int drm_debugfs_create_files(struct drm_info_list *files, int count,
+                            struct dentry *root, struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+       struct dentry *ent;
+       struct drm_info_node *tmp;
+       char name[64];
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+               u32 features = files[i].driver_features;
+
+               if (features != 0 &&
+                   (dev->driver->driver_features & features) != features)
+                       continue;
+
+               tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+               if (tmp == NULL) {
+                       ret = -1;
+                       goto fail;
+               }
+               ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
+                                         root, tmp, &drm_debugfs_fops);
+               if (!ent) {
+                       DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
+                                 name, files[i].name);
+                       kfree(tmp);
+                       ret = -1;
+                       goto fail;
+               }
+
+               tmp->minor = minor;
+               tmp->dent = ent;
+               tmp->info_ent = &files[i];
+               list_add(&(tmp->list), &(minor->debugfs_nodes.list));
+       }
+       return 0;
+
+fail:
+       drm_debugfs_remove_files(files, count, minor);
+       return ret;
+}
+EXPORT_SYMBOL(drm_debugfs_create_files);
+
+/**
+ * Initialize the DRI debugfs filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
+ * \param root DRI debugfs dir entry.
+ *
+ * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
+ * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
+ * "/sys/kernel/debug/dri/%minor%/%name%".
+ */
+int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+                    struct dentry *root)
+{
+       struct drm_device *dev = minor->dev;
+       char name[64];
+       int ret;
+
+       INIT_LIST_HEAD(&minor->debugfs_nodes.list);
+       sprintf(name, "%d", minor_id);
+       minor->debugfs_root = debugfs_create_dir(name, root);
+       if (!minor->debugfs_root) {
+               DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
+               return -1;
+       }
+
+       ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+                                      minor->debugfs_root, minor);
+       if (ret) {
+               debugfs_remove(minor->debugfs_root);
+               minor->debugfs_root = NULL;
+               DRM_ERROR("Failed to create core drm debugfs files\n");
+               return ret;
+       }
+
+       if (dev->driver->debugfs_init) {
+               ret = dev->driver->debugfs_init(minor);
+               if (ret) {
+                       DRM_ERROR("DRM: Driver failed to initialize "
+                                 "/sys/kernel/debug/dri.\n");
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+
+/**
+ * Remove a list of debugfs files
+ *
+ * \param files The list of files
+ * \param count The number of files
+ * \param minor The minor of which we should remove the files
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+                            struct drm_minor *minor)
+{
+       struct list_head *pos, *q;
+       struct drm_info_node *tmp;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
+                       tmp = list_entry(pos, struct drm_info_node, list);
+                       if (tmp->info_ent == &files[i]) {
+                               debugfs_remove(tmp->dent);
+                               list_del(pos);
+                               kfree(tmp);
+                       }
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_debugfs_remove_files);
+
+/**
+ * Cleanup the debugfs filesystem resources.
+ *
+ * \param minor device minor number.
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_cleanup(struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+
+       if (!minor->debugfs_root)
+               return 0;
+
+       if (dev->driver->debugfs_cleanup)
+               dev->driver->debugfs_cleanup(minor);
+
+       drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
+
+       debugfs_remove(minor->debugfs_root);
+       minor->debugfs_root = NULL;
+
+       return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_dma.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_dma.c
new file mode 100644 (file)
index 0000000..13f1537
--- /dev/null
@@ -0,0 +1,163 @@
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Initialize the DMA data.
+ *
+ * \param dev DRM device.
+ * \return zero on success or a negative value on failure.
+ *
+ * Allocate and initialize a drm_device_dma structure.
+ */
+int drm_dma_setup(struct drm_device *dev)
+{
+       int i;
+
+       dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+       if (!dev->dma)
+               return -ENOMEM;
+
+       memset(dev->dma, 0, sizeof(*dev->dma));
+
+       for (i = 0; i <= DRM_MAX_ORDER; i++)
+               memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
+
+       return 0;
+}
+
+/**
+ * Cleanup the DMA resources.
+ *
+ * \param dev DRM device.
+ *
+ * Free all pages associated with DMA buffers, the buffers and pages lists, and
+ * finally the drm_device::dma structure itself.
+ */
+void drm_dma_takedown(struct drm_device *dev)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int i, j;
+
+       if (!dma)
+               return;
+
+       /* Clear dma buffers */
+       for (i = 0; i <= DRM_MAX_ORDER; i++) {
+               if (dma->bufs[i].seg_count) {
+                       DRM_DEBUG("order %d: buf_count = %d,"
+                                 " seg_count = %d\n",
+                                 i,
+                                 dma->bufs[i].buf_count,
+                                 dma->bufs[i].seg_count);
+                       for (j = 0; j < dma->bufs[i].seg_count; j++) {
+                               if (dma->bufs[i].seglist[j]) {
+                                       drm_pci_free(dev, dma->bufs[i].seglist[j]);
+                               }
+                       }
+                       kfree(dma->bufs[i].seglist);
+               }
+               if (dma->bufs[i].buf_count) {
+                       for (j = 0; j < dma->bufs[i].buf_count; j++) {
+                               kfree(dma->bufs[i].buflist[j].dev_private);
+                       }
+                       kfree(dma->bufs[i].buflist);
+               }
+       }
+
+       kfree(dma->buflist);
+       kfree(dma->pagelist);
+       kfree(dev->dma);
+       dev->dma = NULL;
+}
+
+/**
+ * Free a buffer.
+ *
+ * \param dev DRM device.
+ * \param buf buffer to free.
+ *
+ * Resets the fields of \p buf.
+ */
+void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
+{
+       if (!buf)
+               return;
+
+       buf->waiting = 0;
+       buf->pending = 0;
+       buf->file_priv = NULL;
+       buf->used = 0;
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
+           && waitqueue_active(&buf->dma_wait)) {
+               wake_up_interruptible(&buf->dma_wait);
+       }
+}
+
+/**
+ * Reclaim the buffers.
+ *
+ * \param file_priv DRM file private.
+ *
+ * Frees each buffer associated with \p file_priv not already on the hardware.
+ */
+void drm_core_reclaim_buffers(struct drm_device *dev,
+                             struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int i;
+
+       if (!dma)
+               return;
+       for (i = 0; i < dma->buf_count; i++) {
+               if (dma->buflist[i]->file_priv == file_priv) {
+                       switch (dma->buflist[i]->list) {
+                       case DRM_LIST_NONE:
+                               drm_free_buffer(dev, dma->buflist[i]);
+                               break;
+                       case DRM_LIST_WAIT:
+                               dma->buflist[i]->list = DRM_LIST_RECLAIM;
+                               break;
+                       default:
+                               /* Buffer already on hardware. */
+                               break;
+                       }
+               }
+       }
+}
+
+EXPORT_SYMBOL(drm_core_reclaim_buffers);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_drawable.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_drawable.c
new file mode 100644 (file)
index 0000000..c53c976
--- /dev/null
@@ -0,0 +1,198 @@
+/**
+ * \file drm_drawable.c
+ * IOCTLs for drawables
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ * \author Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Allocate drawable ID and memory to store information about it.
+ */
+int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       unsigned long irqflags;
+       struct drm_draw *draw = data;
+       int new_id = 0;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&dev->drw_lock, irqflags);
+       ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
+       if (ret == -EAGAIN) {
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+               goto again;
+       }
+
+       spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+       draw->handle = new_id;
+
+       DRM_DEBUG("%d\n", draw->handle);
+
+       return 0;
+}
+
+/**
+ * Free drawable ID and memory to store information about it.
+ */
+int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_draw *draw = data;
+       unsigned long irqflags;
+       struct drm_drawable_info *info;
+
+       spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+       info = drm_get_drawable_info(dev, draw->handle);
+       if (info == NULL) {
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+               return -EINVAL;
+       }
+       kfree(info->rects);
+       kfree(info);
+
+       idr_remove(&dev->drw_idr, draw->handle);
+
+       spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+       DRM_DEBUG("%d\n", draw->handle);
+       return 0;
+}
+
+int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_update_draw *update = data;
+       unsigned long irqflags;
+       struct drm_clip_rect *rects;
+       struct drm_drawable_info *info;
+       int err;
+
+       info = idr_find(&dev->drw_idr, update->handle);
+       if (!info) {
+               info = kzalloc(sizeof(*info), GFP_KERNEL);
+               if (!info)
+                       return -ENOMEM;
+               if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
+                       DRM_ERROR("No such drawable %d\n", update->handle);
+                       kfree(info);
+                       return -EINVAL;
+               }
+       }
+
+       switch (update->type) {
+       case DRM_DRAWABLE_CLIPRECTS:
+               if (update->num == 0)
+                       rects = NULL;
+               else if (update->num != info->num_rects) {
+                       rects = kmalloc(update->num *
+                                       sizeof(struct drm_clip_rect),
+                                       GFP_KERNEL);
+               } else
+                       rects = info->rects;
+
+               if (update->num && !rects) {
+                       DRM_ERROR("Failed to allocate cliprect memory\n");
+                       err = -ENOMEM;
+                       goto error;
+               }
+
+               if (update->num && DRM_COPY_FROM_USER(rects,
+                                                    (struct drm_clip_rect __user *)
+                                                    (unsigned long)update->data,
+                                                    update->num *
+                                                    sizeof(*rects))) {
+                       DRM_ERROR("Failed to copy cliprects from userspace\n");
+                       err = -EFAULT;
+                       goto error;
+               }
+
+               spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+               if (rects != info->rects) {
+                       kfree(info->rects);
+               }
+
+               info->rects = rects;
+               info->num_rects = update->num;
+
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+               DRM_DEBUG("Updated %d cliprects for drawable %d\n",
+                         info->num_rects, update->handle);
+               break;
+       default:
+               DRM_ERROR("Invalid update type %d\n", update->type);
+               return -EINVAL;
+       }
+
+       return 0;
+
+error:
+       if (rects != info->rects)
+               kfree(rects);
+
+       return err;
+}
+
+/**
+ * Caller must hold the drawable spinlock!
+ */
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
+{
+       return idr_find(&dev->drw_idr, id);
+}
+EXPORT_SYMBOL(drm_get_drawable_info);
+
+static int drm_drawable_free(int idr, void *p, void *data)
+{
+       struct drm_drawable_info *info = p;
+
+       if (info) {
+               kfree(info->rects);
+               kfree(info);
+       }
+
+       return 0;
+}
+
+void drm_drawable_free_all(struct drm_device *dev)
+{
+       idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
+       idr_remove_all(&dev->drw_idr);
+}
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_drv.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_drv.c
new file mode 100644 (file)
index 0000000..ff2f104
--- /dev/null
@@ -0,0 +1,536 @@
+/**
+ * \file drm_drv.c
+ * Generic driver template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * To use this template, you must at least define the following (samples
+ * given for the MGA driver):
+ *
+ * \code
+ * #define DRIVER_AUTHOR       "VA Linux Systems, Inc."
+ *
+ * #define DRIVER_NAME         "mga"
+ * #define DRIVER_DESC         "Matrox G200/G400"
+ * #define DRIVER_DATE         "20001127"
+ *
+ * #define drm_x               mga_##x
+ * \endcode
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include "drmP.h"
+#include "drm_core.h"
+
+
+static int drm_version(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+
+/** Ioctl table */
+static struct drm_ioctl_desc drm_ioctls[] = {
+       DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+       /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+       DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+#if __OS_HAS_AGP
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
+};
+
+#define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+       struct drm_vma_entry *vma, *vma_temp;
+       int i;
+
+       DRM_DEBUG("\n");
+
+       if (dev->driver->lastclose)
+               dev->driver->lastclose(dev);
+       DRM_DEBUG("driver lastclose completed\n");
+
+       if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_irq_uninstall(dev);
+
+       mutex_lock(&dev->struct_mutex);
+
+       /* Free drawable information memory */
+       drm_drawable_free_all(dev);
+       del_timer(&dev->timer);
+
+       /* Clear AGP information */
+       if (drm_core_has_AGP(dev) && dev->agp &&
+                       !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               struct drm_agp_mem *entry, *tempe;
+
+               /* Remove AGP resources, but leave dev->agp
+                  intact until drv_cleanup is called. */
+               list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+                       if (entry->bound)
+                               drm_unbind_agp(entry->memory);
+                       drm_free_agp(entry->memory, entry->pages);
+                       kfree(entry);
+               }
+               INIT_LIST_HEAD(&dev->agp->memory);
+
+               if (dev->agp->acquired)
+                       drm_agp_release(dev);
+
+               dev->agp->acquired = 0;
+               dev->agp->enabled = 0;
+       }
+       if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               drm_sg_cleanup(dev->sg);
+               dev->sg = NULL;
+       }
+
+       /* Clear vma list (only built for debugging) */
+       list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+               list_del(&vma->head);
+               kfree(vma);
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
+               for (i = 0; i < dev->queue_count; i++) {
+                       kfree(dev->queuelist[i]);
+                       dev->queuelist[i] = NULL;
+               }
+               kfree(dev->queuelist);
+               dev->queuelist = NULL;
+       }
+       dev->queue_count = 0;
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_dma_takedown(dev);
+
+       dev->dev_mapping = NULL;
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_DEBUG("lastclose completed\n");
+       return 0;
+}
+
+/**
+ * Module initialization. Called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported).
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes an array of drm_device structures, and attempts to
+ * initialize all available devices, using consecutive minors, registering the
+ * stubs and initializing the AGP device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_init(struct drm_driver *driver)
+{
+       struct pci_dev *pdev = NULL;
+       const struct pci_device_id *pid;
+       int i;
+
+       DRM_DEBUG("\n");
+
+       INIT_LIST_HEAD(&driver->device_list);
+
+       if (driver->driver_features & DRIVER_MODESET)
+               return pci_register_driver(&driver->pci_driver);
+
+       /* If not using KMS, fall back to stealth mode manual scanning. */
+       for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
+               pid = &driver->pci_driver.id_table[i];
+
+               /* Loop around setting up a DRM device for each PCI device
+                * matching our ID and device class.  If we had the internal
+                * function that pci_get_subsys and pci_get_class used, we'd
+                * be able to just pass pid in instead of doing a two-stage
+                * thing.
+                */
+               pdev = NULL;
+               while ((pdev =
+                       pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
+                                      pid->subdevice, pdev)) != NULL) {
+                       if ((pdev->class & pid->class_mask) != pid->class)
+                               continue;
+
+                       /* stealth mode requires a manual probe */
+                       pci_dev_get(pdev);
+                       drm_get_dev(pdev, pid, driver);
+               }
+       }
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_init);
+
+void drm_exit(struct drm_driver *driver)
+{
+       struct drm_device *dev, *tmp;
+       DRM_DEBUG("\n");
+
+       if (driver->driver_features & DRIVER_MODESET) {
+               pci_unregister_driver(&driver->pci_driver);
+       } else {
+               list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+                       drm_put_dev(dev);
+       }
+
+       DRM_INFO("Module unloaded\n");
+}
+
+EXPORT_SYMBOL(drm_exit);
+
+/** File operations structure */
+static const struct file_operations drm_stub_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_stub_open
+};
+
+static int __init drm_core_init(void)
+{
+       int ret = -ENOMEM;
+
+       idr_init(&drm_minors_idr);
+
+       if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
+               goto err_p1;
+
+       drm_class = drm_sysfs_create(THIS_MODULE, "drm");
+       if (IS_ERR(drm_class)) {
+               printk(KERN_ERR "DRM: Error creating drm class.\n");
+               ret = PTR_ERR(drm_class);
+               goto err_p2;
+       }
+
+       drm_proc_root = proc_mkdir("dri", NULL);
+       if (!drm_proc_root) {
+               DRM_ERROR("Cannot create /proc/dri\n");
+               ret = -1;
+               goto err_p3;
+       }
+
+       drm_debugfs_root = debugfs_create_dir("dri", NULL);
+       if (!drm_debugfs_root) {
+               DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
+               ret = -1;
+               goto err_p3;
+       }
+
+       DRM_INFO("Initialized %s %d.%d.%d %s\n",
+                CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+       return 0;
+err_p3:
+       drm_sysfs_destroy();
+err_p2:
+       unregister_chrdev(DRM_MAJOR, "drm");
+
+       idr_destroy(&drm_minors_idr);
+err_p1:
+       return ret;
+}
+
+static void __exit drm_core_exit(void)
+{
+       remove_proc_entry("dri", NULL);
+       debugfs_remove(drm_debugfs_root);
+       drm_sysfs_destroy();
+
+       unregister_chrdev(DRM_MAJOR, "drm");
+
+       idr_destroy(&drm_minors_idr);
+}
+
+module_init(drm_core_init);
+module_exit(drm_core_exit);
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+{
+       int len;
+
+       /* don't overflow userbuf */
+       len = strlen(value);
+       if (len > *buf_len)
+               len = *buf_len;
+
+       /* let userspace know exact length of driver value (which could be
+        * larger than the userspace-supplied buffer) */
+       *buf_len = strlen(value);
+
+       /* finally, try filling in the userbuf */
+       if (len && buf)
+               if (copy_to_user(buf, value, len))
+                       return -EFAULT;
+       return 0;
+}
+
+/**
+ * Get version information
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_version structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+static int drm_version(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_version *version = data;
+       int err;
+
+       version->version_major = dev->driver->major;
+       version->version_minor = dev->driver->minor;
+       version->version_patchlevel = dev->driver->patchlevel;
+       err = drm_copy_field(version->name, &version->name_len,
+                       dev->driver->name);
+       if (!err)
+               err = drm_copy_field(version->date, &version->date_len,
+                               dev->driver->date);
+       if (!err)
+               err = drm_copy_field(version->desc, &version->desc_len,
+                               dev->driver->desc);
+
+       return err;
+}
+
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
+int drm_ioctl(struct inode *inode, struct file *filp,
+             unsigned int cmd, unsigned long arg)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_ioctl_desc *ioctl;
+       drm_ioctl_t *func;
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+       int retcode = -EINVAL;
+       char stack_kdata[128];
+       char *kdata = NULL;
+
+       atomic_inc(&dev->ioctl_count);
+       atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+       ++file_priv->ioctl_count;
+
+       DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
+                 task_pid_nr(current), cmd, nr,
+                 (long)old_encode_dev(file_priv->minor->device),
+                 file_priv->authenticated);
+
+       if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+           ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+               goto err_i1;
+       if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+           (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+               ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+       else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+               ioctl = &drm_ioctls[nr];
+               cmd = ioctl->cmd;
+       } else
+               goto err_i1;
+
+       /* Do not trust userspace, use our own definition */
+       func = ioctl->func;
+       /* is there a local override? */
+       if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
+               func = dev->driver->dma_ioctl;
+
+       if (!func) {
+               DRM_DEBUG("no function\n");
+               retcode = -EINVAL;
+       } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
+                  ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+                  ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
+                  (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+               retcode = -EACCES;
+       } else {
+               if (cmd & (IOC_IN | IOC_OUT)) {
+                       if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
+                               kdata = stack_kdata;
+                       } else {
+                               kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+                               if (!kdata) {
+                                       retcode = -ENOMEM;
+                                       goto err_i1;
+                               }
+                       }
+               }
+
+               if (cmd & IOC_IN) {
+                       if (copy_from_user(kdata, (void __user *)arg,
+                                          _IOC_SIZE(cmd)) != 0) {
+                               retcode = -EFAULT;
+                               goto err_i1;
+                       }
+               }
+               retcode = func(dev, kdata, file_priv);
+
+               if (cmd & IOC_OUT) {
+                       if (copy_to_user((void __user *)arg, kdata,
+                                        _IOC_SIZE(cmd)) != 0)
+                               retcode = -EFAULT;
+               }
+       }
+
+      err_i1:
+       if (kdata != stack_kdata)
+               kfree(kdata);
+       atomic_dec(&dev->ioctl_count);
+       if (retcode)
+               DRM_DEBUG("ret = %x\n", retcode);
+       return retcode;
+}
+
+EXPORT_SYMBOL(drm_ioctl);
+
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+       struct drm_map_list *entry;
+
+       list_for_each_entry(entry, &dev->maplist, head) {
+               if (entry->map && entry->map->type == _DRM_SHM &&
+                   (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+                       return entry->map;
+               }
+       }
+       return NULL;
+}
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_edid.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_edid.c
new file mode 100644 (file)
index 0000000..c39b26f
--- /dev/null
@@ -0,0 +1,1387 @@
+/*
+ * Copyright (c) 2006 Luc Verhaegen (quirks list)
+ * Copyright (c) 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ *   Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm_edid.h"
+
+/*
+ * TODO:
+ *   - support EDID 1.4 (incl. CE blocks)
+ */
+
+/*
+ * EDID blocks out in the wild have a variety of bugs, try to collect
+ * them here (note that userspace may work around broken monitors first,
+ * but fixes should make their way here so that the kernel "just works"
+ * on as many displays as possible).
+ */
+
+/* First detailed mode wrong, use largest 60Hz mode */
+#define EDID_QUIRK_PREFER_LARGE_60             (1 << 0)
+/* Reported 135MHz pixel clock is too high, needs adjustment */
+#define EDID_QUIRK_135_CLOCK_TOO_HIGH          (1 << 1)
+/* Prefer the largest mode at 75 Hz */
+#define EDID_QUIRK_PREFER_LARGE_75             (1 << 2)
+/* Detail timing is in cm not mm */
+#define EDID_QUIRK_DETAILED_IN_CM              (1 << 3)
+/* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE   (1 << 4)
+/* Monitor forgot to set the first detailed is preferred bit. */
+#define EDID_QUIRK_FIRST_DETAILED_PREFERRED    (1 << 5)
+/* use +hsync +vsync for detailed mode */
+#define EDID_QUIRK_DETAILED_SYNC_PP            (1 << 6)
+/* define the number of Extension EDID block */
+#define MAX_EDID_EXT_NUM 4
+
+#define LEVEL_DMT      0
+#define LEVEL_GTF      1
+#define LEVEL_CVT      2
+
+static struct edid_quirk {
+       char *vendor;
+       int product_id;
+       u32 quirks;
+} edid_quirk_list[] = {
+       /* Acer AL1706 */
+       { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+       /* Acer F51 */
+       { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+       /* Unknown Acer */
+       { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Belinea 10 15 55 */
+       { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+       { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+
+       /* Envision Peripherals, Inc. EN-7100e */
+       { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+
+       /* Funai Electronics PM36B */
+       { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+         EDID_QUIRK_DETAILED_IN_CM },
+
+       /* LG Philips LCD LP154W01-A5 */
+       { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+       { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+
+       /* Philips 107p5 CRT */
+       { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Proview AY765C */
+       { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Samsung SyncMaster 205BW.  Note: irony */
+       { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+       /* Samsung SyncMaster 22[5-6]BW */
+       { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+       { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+};
+
+
+/* Valid EDID header has these bytes */
+static const u8 edid_header[] = {
+       0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
+
+/**
+ * edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity check the EDID block by looking at the header, the version number
+ * and the checksum.  Return 0 if the EDID doesn't check out, or 1 if it's
+ * valid.
+ */
+static bool edid_is_valid(struct edid *edid)
+{
+       int i, score = 0;
+       u8 csum = 0;
+       u8 *raw_edid = (u8 *)edid;
+
+       for (i = 0; i < sizeof(edid_header); i++)
+               if (raw_edid[i] == edid_header[i])
+                       score++;
+
+       if (score == 8) ;
+       else if (score >= 6) {
+               DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+               memcpy(raw_edid, edid_header, sizeof(edid_header));
+       } else
+               goto bad;
+
+       for (i = 0; i < EDID_LENGTH; i++)
+               csum += raw_edid[i];
+       if (csum) {
+               DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+               goto bad;
+       }
+
+       if (edid->version != 1) {
+               DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+               goto bad;
+       }
+
+       if (edid->revision > 4)
+               DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+
+       return 1;
+
+bad:
+       if (raw_edid) {
+               DRM_ERROR("Raw EDID:\n");
+               print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
+               printk("\n");
+       }
+       return 0;
+}
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+static bool edid_vendor(struct edid *edid, char *vendor)
+{
+       char edid_vendor[3];
+
+       edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+       edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+                         ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+       edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
+
+       return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+static u32 edid_get_quirks(struct edid *edid)
+{
+       struct edid_quirk *quirk;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+               quirk = &edid_quirk_list[i];
+
+               if (edid_vendor(edid, quirk->vendor) &&
+                   (EDID_PRODUCT_ID(edid) == quirk->product_id))
+                       return quirk->quirks;
+       }
+
+       return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+static void edid_fixup_preferred(struct drm_connector *connector,
+                                u32 quirks)
+{
+       struct drm_display_mode *t, *cur_mode, *preferred_mode;
+       int target_refresh = 0;
+
+       if (list_empty(&connector->probed_modes))
+               return;
+
+       if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+               target_refresh = 60;
+       if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+               target_refresh = 75;
+
+       preferred_mode = list_first_entry(&connector->probed_modes,
+                                         struct drm_display_mode, head);
+
+       list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
+               cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+               if (cur_mode == preferred_mode)
+                       continue;
+
+               /* Largest mode is preferred */
+               if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+                       preferred_mode = cur_mode;
+
+               /* At a given size, try to get closest to target refresh */
+               if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+                   MODE_REFRESH_DIFF(cur_mode, target_refresh) <
+                   MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+                       preferred_mode = cur_mode;
+               }
+       }
+
+       preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+/*
+ * Add the Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ * But the mode with Reduced blank feature is deleted.
+ */
+static struct drm_display_mode drm_dmt_modes[] = {
+       /* 640x350@85Hz */
+       { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+                  736, 832, 0, 350, 382, 385, 445, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 640x400@85Hz */
+       { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+                  736, 832, 0, 400, 401, 404, 445, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 720x400@85Hz */
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+                  828, 936, 0, 400, 401, 404, 446, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 640x480@60Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+                  752, 800, 0, 480, 489, 492, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 640x480@72Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+                  704, 832, 0, 480, 489, 492, 520, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 640x480@75Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+                  720, 840, 0, 480, 481, 484, 500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 640x480@85Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+                  752, 832, 0, 480, 481, 484, 509, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 800x600@56Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+                  896, 1024, 0, 600, 601, 603, 625, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 800x600@60Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+                  968, 1056, 0, 600, 601, 605, 628, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 800x600@72Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+                  976, 1040, 0, 600, 637, 643, 666, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 800x600@75Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+                  896, 1056, 0, 600, 601, 604, 625, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 800x600@85Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+                  896, 1048, 0, 600, 601, 604, 631, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 848x480@60Hz */
+       { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+                  976, 1088, 0, 480, 486, 494, 517, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1024x768@43Hz, interlace */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+                  1208, 1264, 0, 768, 768, 772, 817, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1024x768@60Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+                  1184, 1344, 0, 768, 771, 777, 806, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1024x768@70Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+                  1184, 1328, 0, 768, 771, 777, 806, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1024x768@75Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+                  1136, 1312, 0, 768, 769, 772, 800, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1024x768@85Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+                  1072, 1376, 0, 768, 769, 772, 808, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1152x864@75Hz */
+       { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+                  1344, 1600, 0, 864, 865, 868, 900, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x768@60Hz */
+       { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+                  1472, 1664, 0, 768, 771, 778, 798, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x768@75Hz */
+       { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+                  1488, 1696, 0, 768, 771, 778, 805, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1280x768@85Hz */
+       { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+                  1496, 1712, 0, 768, 771, 778, 809, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x800@60Hz */
+       { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+                  1480, 1680, 0, 800, 803, 809, 831, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1280x800@75Hz */
+       { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+                  1488, 1696, 0, 800, 803, 809, 838, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x800@85Hz */
+       { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+                  1496, 1712, 0, 800, 803, 809, 843, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x960@60Hz */
+       { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+                  1488, 1800, 0, 960, 961, 964, 1000, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x960@85Hz */
+       { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+                  1504, 1728, 0, 960, 961, 964, 1011, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x1024@60Hz */
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+                  1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x1024@75Hz */
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+                  1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x1024@85Hz */
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+                  1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1360x768@60Hz */
+       { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+                  1536, 1792, 0, 768, 771, 777, 795, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x1050@60Hz */
+       { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+                  1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x1050@75Hz */
+       { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+                  1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x1050@85Hz */
+       { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+                  1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x900@60Hz */
+       { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+                  1672, 1904, 0, 900, 903, 909, 934, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x900@75Hz */
+       { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+                  1688, 1936, 0, 900, 903, 909, 942, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1440x900@85Hz */
+       { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+                  1696, 1952, 0, 900, 903, 909, 948, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1600x1200@60Hz */
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+                  1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1600x1200@65Hz */
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+                  1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1600x1200@70Hz */
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+                  1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1600x1200@75Hz */
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+                  1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1600x1200@85Hz */
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+                  1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1680x1050@60Hz */
+       { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+                  1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1680x1050@75Hz */
+       { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+                  1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1680x1050@85Hz */
+       { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+                  1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1792x1344@60Hz */
+       { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+                  2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1729x1344@75Hz */
+       { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+                  2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1853x1392@60Hz */
+       { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+                  2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1856x1392@75Hz */
+       { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+                  2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1200@60Hz */
+       { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+                  2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1200@75Hz */
+       { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+                  2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1200@85Hz */
+       { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+                  2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1440@60Hz */
+       { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+                  2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1440@75Hz */
+       { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+                  2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 2560x1600@60Hz */
+       { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+                  3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 2560x1600@75HZ */
+       { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+                  3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 2560x1600@85HZ */
+       { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+                  3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_dmt_modes =
+       sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+
+static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
+                       int hsize, int vsize, int fresh)
+{
+       int i;
+       struct drm_display_mode *ptr, *mode;
+
+       mode = NULL;
+       for (i = 0; i < drm_num_dmt_modes; i++) {
+               ptr = &drm_dmt_modes[i];
+               if (hsize == ptr->hdisplay &&
+                       vsize == ptr->vdisplay &&
+                       fresh == drm_mode_vrefresh(ptr)) {
+                       /* get the expected default mode */
+                       mode = drm_mode_duplicate(dev, ptr);
+                       break;
+               }
+       }
+       return mode;
+}
+
+/*
+ * 0 is reserved.  The spec says 0x01 fill for unused timings.  Some old
+ * monitors fill with ascii space (0x20) instead.
+ */
+static int
+bad_std_timing(u8 a, u8 b)
+{
+       return (a == 0x00 && b == 0x00) ||
+              (a == 0x01 && b == 0x01) ||
+              (a == 0x20 && b == 0x20);
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ * @timing_level: standard timing level
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT/GTF/DMT.
+ *
+ * Punts for now, but should eventually use the FB layer's CVT based mode
+ * generation code.
+ */
+struct drm_display_mode *drm_mode_std(struct drm_device *dev,
+                                     struct std_timing *t,
+                                     int revision,
+                                     int timing_level)
+{
+       struct drm_display_mode *mode;
+       int hsize, vsize;
+       int vrefresh_rate;
+       unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
+               >> EDID_TIMING_ASPECT_SHIFT;
+       unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
+               >> EDID_TIMING_VFREQ_SHIFT;
+
+       if (bad_std_timing(t->hsize, t->vfreq_aspect))
+               return NULL;
+
+       /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
+       hsize = t->hsize * 8 + 248;
+       /* vrefresh_rate = vfreq + 60 */
+       vrefresh_rate = vfreq + 60;
+       /* the vdisplay is calculated based on the aspect ratio */
+       if (aspect_ratio == 0) {
+               if (revision < 3)
+                       vsize = hsize;
+               else
+                       vsize = (hsize * 10) / 16;
+       } else if (aspect_ratio == 1)
+               vsize = (hsize * 3) / 4;
+       else if (aspect_ratio == 2)
+               vsize = (hsize * 4) / 5;
+       else
+               vsize = (hsize * 9) / 16;
+       /* HDTV hack */
+       if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
+               mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+                                   false);
+               mode->hdisplay = 1366;
+               mode->vsync_start = mode->vsync_start - 1;
+               mode->vsync_end = mode->vsync_end - 1;
+               return mode;
+       }
+       mode = NULL;
+       /* check whether it can be found in default mode table */
+       mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
+       if (mode)
+               return mode;
+
+       switch (timing_level) {
+       case LEVEL_DMT:
+               break;
+       case LEVEL_GTF:
+               mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+               break;
+       case LEVEL_CVT:
+               mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+                                   false);
+               break;
+       }
+       return mode;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+ * @edid: EDID block
+ * @timing: EDID detailed timing info
+ * @quirks: quirks to apply
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.
+ */
+static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+                                                 struct edid *edid,
+                                                 struct detailed_timing *timing,
+                                                 u32 quirks)
+{
+       struct drm_display_mode *mode;
+       struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+       unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
+       unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
+       unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
+       unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
+       unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+       unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+       unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+       unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
+
+       /* ignore tiny modes */
+       if (hactive < 64 || vactive < 64)
+               return NULL;
+
+       if (pt->misc & DRM_EDID_PT_STEREO) {
+               printk(KERN_WARNING "stereo mode not supported\n");
+               return NULL;
+       }
+       if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+               printk(KERN_WARNING "integrated sync not supported\n");
+               return NULL;
+       }
+
+       /* it is incorrect if hsync/vsync width is zero */
+       if (!hsync_pulse_width || !vsync_pulse_width) {
+               DRM_DEBUG_KMS("Incorrect Detailed timing. "
+                               "Wrong Hsync/Vsync pulse width\n");
+               return NULL;
+       }
+       mode = drm_mode_create(dev);
+       if (!mode)
+               return NULL;
+
+       mode->type = DRM_MODE_TYPE_DRIVER;
+
+       if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+               timing->pixel_clock = cpu_to_le16(1088);
+
+       mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
+
+       mode->hdisplay = hactive;
+       mode->hsync_start = mode->hdisplay + hsync_offset;
+       mode->hsync_end = mode->hsync_start + hsync_pulse_width;
+       mode->htotal = mode->hdisplay + hblank;
+
+       mode->vdisplay = vactive;
+       mode->vsync_start = mode->vdisplay + vsync_offset;
+       mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+       mode->vtotal = mode->vdisplay + vblank;
+
+       /* perform the basic check for the detailed timing */
+       if (mode->hsync_end > mode->htotal ||
+               mode->vsync_end > mode->vtotal) {
+               drm_mode_destroy(dev, mode);
+               DRM_DEBUG_KMS("Incorrect detailed timing. "
+                               "Sync is beyond the blank.\n");
+               return NULL;
+       }
+
+       /* Some EDIDs have bogus h/vtotal values */
+       if (mode->hsync_end > mode->htotal)
+               mode->htotal = mode->hsync_end + 1;
+       if (mode->vsync_end > mode->vtotal)
+               mode->vtotal = mode->vsync_end + 1;
+
+       drm_mode_set_name(mode);
+
+       if (pt->misc & DRM_EDID_PT_INTERLACED)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+       if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+               pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+       }
+
+       mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+               DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+       mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+               DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+       mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+       mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+
+       if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+               mode->width_mm *= 10;
+               mode->height_mm *= 10;
+       }
+
+       if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+               mode->width_mm = edid->width_cm * 10;
+               mode->height_mm = edid->height_cm * 10;
+       }
+
+       return mode;
+}
+
+/*
+ * Detailed mode info for the EDID "established modes" data to use.
+ */
+static struct drm_display_mode edid_est_modes[] = {
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+                  968, 1056, 0, 600, 601, 605, 628, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+                  896, 1024, 0, 600, 601, 603,  625, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+                  720, 840, 0, 480, 481, 484, 500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+                  704,  832, 0, 480, 489, 491, 520, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+                  768,  864, 0, 480, 483, 486, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+                  752, 800, 0, 480, 490, 492, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+                  846, 900, 0, 400, 421, 423,  449, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+                  846,  900, 0, 400, 412, 414, 449, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+                  1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+                  1136, 1312, 0,  768, 769, 772, 800, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+                  1184, 1328, 0,  768, 771, 777, 806, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+                  1184, 1344, 0,  768, 771, 777, 806, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+                  1208, 1264, 0, 768, 768, 776, 817, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+       { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+                  928, 1152, 0, 624, 625, 628, 667, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+                  896, 1056, 0, 600, 601, 604,  625, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+                  976, 1040, 0, 600, 637, 643, 666, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+       { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+                  1344, 1600, 0,  864, 865, 868, 900, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above).  Tease them out and add them to the global modes list.
+ */
+static int add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       unsigned long est_bits = edid->established_timings.t1 |
+               (edid->established_timings.t2 << 8) |
+               ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+       int i, modes = 0;
+
+       for (i = 0; i <= EDID_EST_TIMINGS; i++)
+               if (est_bits & (1<<i)) {
+                       struct drm_display_mode *newmode;
+                       newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+
+       return modes;
+}
+/**
+ * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+       if (edid->revision >= 2) {
+               if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+                       return LEVEL_CVT;
+               return LEVEL_GTF;
+       }
+       return LEVEL_DMT;
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the CVT standard.  Grab them from
+ * @edid, calculate them, and add them to the list.
+ */
+static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       int i, modes = 0;
+       int timing_level;
+
+       timing_level = standard_timing_level(edid);
+
+       for (i = 0; i < EDID_STD_TIMINGS; i++) {
+               struct std_timing *t = &edid->standard_timings[i];
+               struct drm_display_mode *newmode;
+
+               /* If std timings bytes are 1, 1 it's empty */
+               if (t->hsize == 1 && t->vfreq_aspect == 1)
+                       continue;
+
+               newmode = drm_mode_std(dev, &edid->standard_timings[i],
+                                      edid->revision, timing_level);
+               if (newmode) {
+                       drm_mode_probed_add(connector, newmode);
+                       modes++;
+               }
+       }
+
+       return modes;
+}
+
+/*
+ * XXX fix this for:
+ * - GTF secondary curve formula
+ * - EDID 1.4 range offsets
+ * - CVT extended bits
+ */
+static bool
+mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+{
+       struct detailed_data_monitor_range *range;
+       int hsync, vrefresh;
+
+       range = &timing->data.other_data.data.range;
+
+       hsync = drm_mode_hsync(mode);
+       vrefresh = drm_mode_vrefresh(mode);
+
+       if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+               return false;
+
+       if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+               return false;
+
+       if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
+               /* be forgiving since it's in units of 10MHz */
+               int max_clock = range->pixel_clock_mhz * 10 + 9;
+               max_clock *= 1000;
+               if (mode->clock > max_clock)
+                       return false;
+       }
+
+       return true;
+}
+
+/*
+ * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
+ * need to account for them.
+ */
+static int drm_gtf_modes_for_range(struct drm_connector *connector,
+                                  struct detailed_timing *timing)
+{
+       int i, modes = 0;
+       struct drm_display_mode *newmode;
+       struct drm_device *dev = connector->dev;
+
+       for (i = 0; i < drm_num_dmt_modes; i++) {
+               if (mode_in_range(drm_dmt_modes + i, timing)) {
+                       newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+       }
+
+       return modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+                        struct detailed_timing *timing)
+{
+       int i, j, modes = 0;
+       struct drm_display_mode *newmode;
+       struct drm_device *dev = connector->dev;
+       struct cvt_timing *cvt;
+       const int rates[] = { 60, 85, 75, 60, 50 };
+
+       for (i = 0; i < 4; i++) {
+               int width, height;
+               cvt = &(timing->data.other_data.data.cvt[i]);
+
+               height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
+               switch (cvt->code[1] & 0xc0) {
+               case 0x00:
+                       width = height * 4 / 3;
+                       break;
+               case 0x40:
+                       width = height * 16 / 9;
+                       break;
+               case 0x80:
+                       width = height * 16 / 10;
+                       break;
+               case 0xc0:
+                       width = height * 15 / 9;
+                       break;
+               }
+
+               for (j = 1; j < 5; j++) {
+                       if (cvt->code[2] & (1 << j)) {
+                               newmode = drm_cvt_mode(dev, width, height,
+                                                      rates[j], j == 0,
+                                                      false, false);
+                               if (newmode) {
+                                       drm_mode_probed_add(connector, newmode);
+                                       modes++;
+                               }
+                       }
+               }
+       }
+
+       return modes;
+}
+
+static int add_detailed_modes(struct drm_connector *connector,
+                             struct detailed_timing *timing,
+                             struct edid *edid, u32 quirks, int preferred)
+{
+       int i, modes = 0;
+       struct detailed_non_pixel *data = &timing->data.other_data;
+       int timing_level = standard_timing_level(edid);
+       int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+       struct drm_display_mode *newmode;
+       struct drm_device *dev = connector->dev;
+
+       if (timing->pixel_clock) {
+               newmode = drm_mode_detailed(dev, edid, timing, quirks);
+               if (!newmode)
+                       return 0;
+
+               if (preferred)
+                       newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+               drm_mode_probed_add(connector, newmode);
+               return 1;
+       }
+
+       /* other timing types */
+       switch (data->type) {
+       case EDID_DETAIL_MONITOR_RANGE:
+               if (gtf)
+                       modes += drm_gtf_modes_for_range(connector, timing);
+               break;
+       case EDID_DETAIL_STD_MODES:
+               /* Six modes per detailed section */
+               for (i = 0; i < 6; i++) {
+                       struct std_timing *std;
+                       struct drm_display_mode *newmode;
+
+                       std = &data->data.timings[i];
+                       newmode = drm_mode_std(dev, std, edid->revision,
+                                              timing_level);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+               break;
+       case EDID_DETAIL_CVT_3BYTE:
+               modes += drm_cvt_modes(connector, timing);
+               break;
+       default:
+               break;
+       }
+
+       return modes;
+}
+
+/**
+ * add_detailed_info - get detailed mode info from EDID data
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ *
+ * Some of the detailed timing sections may contain mode information.  Grab
+ * it and add it to the list.
+ */
+static int add_detailed_info(struct drm_connector *connector,
+                            struct edid *edid, u32 quirks)
+{
+       int i, modes = 0;
+
+       for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
+               struct detailed_timing *timing = &edid->detailed_timings[i];
+               int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+               /* In 1.0, only timings are allowed */
+               if (!timing->pixel_clock && edid->version == 1 &&
+                       edid->revision == 0)
+                       continue;
+
+               modes += add_detailed_modes(connector, timing, edid, quirks,
+                                           preferred);
+       }
+
+       return modes;
+}
+
+/**
+ * add_detailed_mode_eedid - get detailed mode info from addtional timing
+ *                     EDID block
+ * @connector: attached connector
+ * @edid: EDID block to scan(It is only to get addtional timing EDID block)
+ * @quirks: quirks to apply
+ *
+ * Some of the detailed timing sections may contain mode information.  Grab
+ * it and add it to the list.
+ */
+static int add_detailed_info_eedid(struct drm_connector *connector,
+                            struct edid *edid, u32 quirks)
+{
+       int i, modes = 0;
+       char *edid_ext = NULL;
+       struct detailed_timing *timing;
+       int edid_ext_num;
+       int start_offset, end_offset;
+       int timing_level;
+
+       if (edid->version == 1 && edid->revision < 3) {
+               /* If the EDID version is less than 1.3, there is no
+                * extension EDID.
+                */
+               return 0;
+       }
+       if (!edid->extensions) {
+               /* if there is no extension EDID, it is unnecessary to
+                * parse the E-EDID to get detailed info
+                */
+               return 0;
+       }
+
+       /* Chose real EDID extension number */
+       edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
+                      MAX_EDID_EXT_NUM : edid->extensions;
+
+       /* Find CEA extension */
+       for (i = 0; i < edid_ext_num; i++) {
+               edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
+               /* This block is CEA extension */
+               if (edid_ext[0] == 0x02)
+                       break;
+       }
+
+       if (i == edid_ext_num) {
+               /* if there is no additional timing EDID block, return */
+               return 0;
+       }
+
+       /* Get the start offset of detailed timing block */
+       start_offset = edid_ext[2];
+       if (start_offset == 0) {
+               /* If the start_offset is zero, it means that neither detailed
+                * info nor data block exist. In such case it is also
+                * unnecessary to parse the detailed timing info.
+                */
+               return 0;
+       }
+
+       timing_level = standard_timing_level(edid);
+       end_offset = EDID_LENGTH;
+       end_offset -= sizeof(struct detailed_timing);
+       for (i = start_offset; i < end_offset;
+                       i += sizeof(struct detailed_timing)) {
+               timing = (struct detailed_timing *)(edid_ext + i);
+               modes += add_detailed_modes(connector, timing, edid, quirks, 0);
+       }
+
+       return modes;
+}
+
+#define DDC_ADDR 0x50
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf     : EDID data buffer to be filled
+ * \param len     : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
+                         unsigned char *buf, int len)
+{
+       unsigned char start = 0x0;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = DDC_ADDR,
+                       .flags  = 0,
+                       .len    = 1,
+                       .buf    = &start,
+               }, {
+                       .addr   = DDC_ADDR,
+                       .flags  = I2C_M_RD,
+                       .len    = len,
+                       .buf    = buf,
+               }
+       };
+
+       if (i2c_transfer(adapter, msgs, 2) == 2)
+               return 0;
+
+       return -1;
+}
+EXPORT_SYMBOL(drm_do_probe_ddc_edid);
+
+static int drm_ddc_read_edid(struct drm_connector *connector,
+                            struct i2c_adapter *adapter,
+                            char *buf, int len)
+{
+       int i;
+
+       for (i = 0; i < 4; i++) {
+               if (drm_do_probe_ddc_edid(adapter, buf, len))
+                       return -1;
+               if (edid_is_valid((struct edid *)buf))
+                       return 0;
+       }
+
+       /* repeated checksum failures; warn, but carry on */
+       dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+                drm_get_connector_name(connector));
+       return -1;
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given connector's i2c channel to grab EDID data if possible.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+                         struct i2c_adapter *adapter)
+{
+       int ret;
+       struct edid *edid;
+
+       edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
+                      GFP_KERNEL);
+       if (edid == NULL) {
+               dev_warn(&connector->dev->pdev->dev,
+                        "Failed to allocate EDID\n");
+               goto end;
+       }
+
+       /* Read first EDID block */
+       ret = drm_ddc_read_edid(connector, adapter,
+                               (unsigned char *)edid, EDID_LENGTH);
+       if (ret != 0)
+               goto clean_up;
+
+       /* There are EDID extensions to be read */
+       if (edid->extensions != 0) {
+               int edid_ext_num = edid->extensions;
+
+               if (edid_ext_num > MAX_EDID_EXT_NUM) {
+                       dev_warn(&connector->dev->pdev->dev,
+                                "The number of extension(%d) is "
+                                "over max (%d), actually read number (%d)\n",
+                                edid_ext_num, MAX_EDID_EXT_NUM,
+                                MAX_EDID_EXT_NUM);
+                       /* Reset EDID extension number to be read */
+                       edid_ext_num = MAX_EDID_EXT_NUM;
+               }
+               /* Read EDID including extensions too */
+               ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
+                                       EDID_LENGTH * (edid_ext_num + 1));
+               if (ret != 0)
+                       goto clean_up;
+
+       }
+
+       connector->display_info.raw_edid = (char *)edid;
+       goto end;
+
+clean_up:
+       kfree(edid);
+       edid = NULL;
+end:
+       return edid;
+
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+#define HDMI_IDENTIFIER 0x000C03
+#define VENDOR_BLOCK    0x03
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+       char *edid_ext = NULL;
+       int i, hdmi_id, edid_ext_num;
+       int start_offset, end_offset;
+       bool is_hdmi = false;
+
+       /* No EDID or EDID extensions */
+       if (edid == NULL || edid->extensions == 0)
+               goto end;
+
+       /* Chose real EDID extension number */
+       edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
+                      MAX_EDID_EXT_NUM : edid->extensions;
+
+       /* Find CEA extension */
+       for (i = 0; i < edid_ext_num; i++) {
+               edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
+               /* This block is CEA extension */
+               if (edid_ext[0] == 0x02)
+                       break;
+       }
+
+       if (i == edid_ext_num)
+               goto end;
+
+       /* Data block offset in CEA extension block */
+       start_offset = 4;
+       end_offset = edid_ext[2];
+
+       /*
+        * Because HDMI identifier is in Vendor Specific Block,
+        * search it from all data blocks of CEA extension.
+        */
+       for (i = start_offset; i < end_offset;
+               /* Increased by data block len */
+               i += ((edid_ext[i] & 0x1f) + 1)) {
+               /* Find vendor specific block */
+               if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
+                       hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
+                                 edid_ext[i + 3] << 16;
+                       /* Find HDMI identifier */
+                       if (hdmi_id == HDMI_IDENTIFIER)
+                               is_hdmi = true;
+                       break;
+               }
+       }
+
+end:
+       return is_hdmi;
+}
+EXPORT_SYMBOL(drm_detect_hdmi_monitor);
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+{
+       int num_modes = 0;
+       u32 quirks;
+
+       if (edid == NULL) {
+               return 0;
+       }
+       if (!edid_is_valid(edid)) {
+               dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+                        drm_get_connector_name(connector));
+               return 0;
+       }
+
+       quirks = edid_get_quirks(edid);
+
+       num_modes += add_established_modes(connector, edid);
+       num_modes += add_standard_modes(connector, edid);
+       num_modes += add_detailed_info(connector, edid, quirks);
+       num_modes += add_detailed_info_eedid(connector, edid, quirks);
+
+       if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+               edid_fixup_preferred(connector, quirks);
+
+       connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
+       connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
+       connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
+       connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
+       connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
+       connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
+       connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
+       connector->display_info.width_mm = edid->width_cm * 10;
+       connector->display_info.height_mm = edid->height_cm * 10;
+       connector->display_info.gamma = edid->gamma;
+       connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
+       connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
+       connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
+       connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
+       connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
+       connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
+       connector->display_info.gamma = edid->gamma;
+
+       return num_modes;
+}
+EXPORT_SYMBOL(drm_add_edid_modes);
+
+/**
+ * drm_add_modes_noedid - add modes for the connectors without EDID
+ * @connector: connector we're probing
+ * @hdisplay: the horizontal display limit
+ * @vdisplay: the vertical display limit
+ *
+ * Add the specified modes to the connector's mode list. Only when the
+ * hdisplay/vdisplay is not beyond the given limit, it will be added.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_modes_noedid(struct drm_connector *connector,
+                       int hdisplay, int vdisplay)
+{
+       int i, count, num_modes = 0;
+       struct drm_display_mode *mode, *ptr;
+       struct drm_device *dev = connector->dev;
+
+       count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+       if (hdisplay < 0)
+               hdisplay = 0;
+       if (vdisplay < 0)
+               vdisplay = 0;
+
+       for (i = 0; i < count; i++) {
+               ptr = &drm_dmt_modes[i];
+               if (hdisplay && vdisplay) {
+                       /*
+                        * Only when two are valid, they will be used to check
+                        * whether the mode should be added to the mode list of
+                        * the connector.
+                        */
+                       if (ptr->hdisplay > hdisplay ||
+                                       ptr->vdisplay > vdisplay)
+                               continue;
+               }
+               if (drm_mode_vrefresh(ptr) > 61)
+                       continue;
+               mode = drm_mode_duplicate(dev, ptr);
+               if (mode) {
+                       drm_mode_probed_add(connector, mode);
+                       num_modes++;
+               }
+       }
+       return num_modes;
+}
+EXPORT_SYMBOL(drm_add_modes_noedid);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_encoder_slave.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_encoder_slave.c
new file mode 100644 (file)
index 0000000..f018469
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drm_encoder_slave.h"
+
+/**
+ * drm_i2c_encoder_init - Initialize an I2C slave encoder
+ * @dev:       DRM device.
+ * @encoder:    Encoder to be attached to the I2C device. You aren't
+ *             required to have called drm_encoder_init() before.
+ * @adap:      I2C adapter that will be used to communicate with
+ *             the device.
+ * @info:      Information that will be used to create the I2C device.
+ *             Required fields are @addr and @type.
+ *
+ * Create an I2C device on the specified bus (the module containing its
+ * driver is transparently loaded) and attach it to the specified
+ * &drm_encoder_slave. The @slave_funcs field will be initialized with
+ * the hooks provided by the slave driver.
+ *
+ * Returns 0 on success or a negative errno on failure, in particular,
+ * -ENODEV is returned when no matching driver is found.
+ */
+int drm_i2c_encoder_init(struct drm_device *dev,
+                        struct drm_encoder_slave *encoder,
+                        struct i2c_adapter *adap,
+                        const struct i2c_board_info *info)
+{
+       char modalias[sizeof(I2C_MODULE_PREFIX)
+                     + I2C_NAME_SIZE];
+       struct module *module = NULL;
+       struct i2c_client *client;
+       struct drm_i2c_encoder_driver *encoder_drv;
+       int err = 0;
+
+       snprintf(modalias, sizeof(modalias),
+                "%s%s", I2C_MODULE_PREFIX, info->type);
+       request_module(modalias);
+
+       client = i2c_new_device(adap, info);
+       if (!client) {
+               err = -ENOMEM;
+               goto fail;
+       }
+
+       if (!client->driver) {
+               err = -ENODEV;
+               goto fail_unregister;
+       }
+
+       module = client->driver->driver.owner;
+       if (!try_module_get(module)) {
+               err = -ENODEV;
+               goto fail_unregister;
+       }
+
+       encoder->bus_priv = client;
+
+       encoder_drv = to_drm_i2c_encoder_driver(client->driver);
+
+       err = encoder_drv->encoder_init(client, dev, encoder);
+       if (err)
+               goto fail_unregister;
+
+       return 0;
+
+fail_unregister:
+       i2c_unregister_device(client);
+       module_put(module);
+fail:
+       return err;
+}
+EXPORT_SYMBOL(drm_i2c_encoder_init);
+
+/**
+ * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
+ * @drm_encoder:       Encoder to be unregistered.
+ *
+ * This should be called from the @destroy method of an I2C slave
+ * encoder driver once I2C access is no longer needed.
+ */
+void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
+{
+       struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
+       struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
+       struct module *module = client->driver->driver.owner;
+
+       i2c_unregister_device(client);
+       encoder->bus_priv = NULL;
+
+       module_put(module);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_destroy);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_fops.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_fops.c
new file mode 100644 (file)
index 0000000..08d14df
--- /dev/null
@@ -0,0 +1,660 @@
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Daryll Strauss <daryll@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include <linux/poll.h>
+#include <linux/smp_lock.h>
+
+static int drm_open_helper(struct inode *inode, struct file *filp,
+                          struct drm_device * dev);
+
+static int drm_setup(struct drm_device * dev)
+{
+       int i;
+       int ret;
+
+       if (dev->driver->firstopen) {
+               ret = dev->driver->firstopen(dev);
+               if (ret != 0)
+                       return ret;
+       }
+
+       atomic_set(&dev->ioctl_count, 0);
+       atomic_set(&dev->vma_count, 0);
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               dev->buf_use = 0;
+               atomic_set(&dev->buf_alloc, 0);
+
+               i = drm_dma_setup(dev);
+               if (i < 0)
+                       return i;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+               atomic_set(&dev->counts[i], 0);
+
+       dev->sigdata.lock = NULL;
+
+       dev->queue_count = 0;
+       dev->queue_reserved = 0;
+       dev->queue_slots = 0;
+       dev->queuelist = NULL;
+       dev->context_flag = 0;
+       dev->interrupt_flag = 0;
+       dev->dma_flag = 0;
+       dev->last_context = 0;
+       dev->last_switch = 0;
+       dev->last_checked = 0;
+       init_waitqueue_head(&dev->context_wait);
+       dev->if_version = 0;
+
+       dev->ctx_start = 0;
+       dev->lck_start = 0;
+
+       dev->buf_async = NULL;
+       init_waitqueue_head(&dev->buf_readers);
+       init_waitqueue_head(&dev->buf_writers);
+
+       DRM_DEBUG("\n");
+
+       /*
+        * The kernel's context could be created here, but is now created
+        * in drm_dma_enqueue.  This is more resource-efficient for
+        * hardware that does not do DMA, but may mean that
+        * drm_select_queue fails between the time the interrupt is
+        * initialized and the time the queues are initialized.
+        */
+
+       return 0;
+}
+
+/**
+ * Open file.
+ *
+ * \param inode device inode
+ * \param filp file pointer.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the DRM device with the same minor number, calls open_helper(), and
+ * increments the device open count. If the open count was previous at zero,
+ * i.e., it's the first that the device is open, then calls setup().
+ */
+int drm_open(struct inode *inode, struct file *filp)
+{
+       struct drm_device *dev = NULL;
+       int minor_id = iminor(inode);
+       struct drm_minor *minor;
+       int retcode = 0;
+
+       minor = idr_find(&drm_minors_idr, minor_id);
+       if (!minor)
+               return -ENODEV;
+
+       if (!(dev = minor->dev))
+               return -ENODEV;
+
+       retcode = drm_open_helper(inode, filp, dev);
+       if (!retcode) {
+               atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+               spin_lock(&dev->count_lock);
+               if (!dev->open_count++) {
+                       spin_unlock(&dev->count_lock);
+                       retcode = drm_setup(dev);
+                       goto out;
+               }
+               spin_unlock(&dev->count_lock);
+       }
+out:
+       mutex_lock(&dev->struct_mutex);
+       if (minor->type == DRM_MINOR_LEGACY) {
+               BUG_ON((dev->dev_mapping != NULL) &&
+                       (dev->dev_mapping != inode->i_mapping));
+               if (dev->dev_mapping == NULL)
+                       dev->dev_mapping = inode->i_mapping;
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return retcode;
+}
+EXPORT_SYMBOL(drm_open);
+
+/**
+ * File \c open operation.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ *
+ * Puts the dev->fops corresponding to the device minor number into
+ * \p filp, call the \c open method, and restore the file operations.
+ */
+int drm_stub_open(struct inode *inode, struct file *filp)
+{
+       struct drm_device *dev = NULL;
+       struct drm_minor *minor;
+       int minor_id = iminor(inode);
+       int err = -ENODEV;
+       const struct file_operations *old_fops;
+
+       DRM_DEBUG("\n");
+
+       /* BKL pushdown: note that nothing else serializes idr_find() */
+       lock_kernel();
+       minor = idr_find(&drm_minors_idr, minor_id);
+       if (!minor)
+               goto out;
+
+       if (!(dev = minor->dev))
+               goto out;
+
+       old_fops = filp->f_op;
+       filp->f_op = fops_get(&dev->driver->fops);
+       if (filp->f_op == NULL) {
+               filp->f_op = old_fops;
+               goto out;
+       }
+       if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
+               fops_put(filp->f_op);
+               filp->f_op = fops_get(old_fops);
+       }
+       fops_put(old_fops);
+
+out:
+       unlock_kernel();
+       return err;
+}
+
+/**
+ * Check whether DRI will run on this CPU.
+ *
+ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
+ */
+static int drm_cpu_valid(void)
+{
+#if defined(__i386__)
+       if (boot_cpu_data.x86 == 3)
+               return 0;       /* No cmpxchg on a 386 */
+#endif
+#if defined(__sparc__) && !defined(__sparc_v9__)
+       return 0;               /* No cmpxchg before v9 sparc. */
+#endif
+       return 1;
+}
+
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param dev device.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct inode *inode, struct file *filp,
+                          struct drm_device * dev)
+{
+       int minor_id = iminor(inode);
+       struct drm_file *priv;
+       int ret;
+
+       if (filp->f_flags & O_EXCL)
+               return -EBUSY;  /* No exclusive opens */
+       if (!drm_cpu_valid())
+               return -EINVAL;
+
+       DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
+
+       priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       memset(priv, 0, sizeof(*priv));
+       filp->private_data = priv;
+       priv->filp = filp;
+       priv->uid = current_euid();
+       priv->pid = task_pid_nr(current);
+       priv->minor = idr_find(&drm_minors_idr, minor_id);
+       priv->ioctl_count = 0;
+       /* for compatibility root is always authenticated */
+       priv->authenticated = capable(CAP_SYS_ADMIN);
+       priv->lock_count = 0;
+
+       INIT_LIST_HEAD(&priv->lhead);
+       INIT_LIST_HEAD(&priv->fbs);
+       INIT_LIST_HEAD(&priv->event_list);
+       init_waitqueue_head(&priv->event_wait);
+       priv->event_space = 4096; /* set aside 4k for event buffer */
+
+       if (dev->driver->driver_features & DRIVER_GEM)
+               drm_gem_open(dev, priv);
+
+       if (dev->driver->open) {
+               ret = dev->driver->open(dev, priv);
+               if (ret < 0)
+                       goto out_free;
+       }
+
+
+       /* if there is no current master make this fd it */
+       mutex_lock(&dev->struct_mutex);
+       if (!priv->minor->master) {
+               /* create a new master */
+               priv->minor->master = drm_master_create(priv->minor);
+               if (!priv->minor->master) {
+                       mutex_unlock(&dev->struct_mutex);
+                       ret = -ENOMEM;
+                       goto out_free;
+               }
+
+               priv->is_master = 1;
+               /* take another reference for the copy in the local file priv */
+               priv->master = drm_master_get(priv->minor->master);
+
+               priv->authenticated = 1;
+
+               mutex_unlock(&dev->struct_mutex);
+               if (dev->driver->master_create) {
+                       ret = dev->driver->master_create(dev, priv->master);
+                       if (ret) {
+                               mutex_lock(&dev->struct_mutex);
+                               /* drop both references if this fails */
+                               drm_master_put(&priv->minor->master);
+                               drm_master_put(&priv->master);
+                               mutex_unlock(&dev->struct_mutex);
+                               goto out_free;
+                       }
+               }
+               mutex_lock(&dev->struct_mutex);
+               if (dev->driver->master_set) {
+                       ret = dev->driver->master_set(dev, priv, true);
+                       if (ret) {
+                               /* drop both references if this fails */
+                               drm_master_put(&priv->minor->master);
+                               drm_master_put(&priv->master);
+                               mutex_unlock(&dev->struct_mutex);
+                               goto out_free;
+                       }
+               }
+               mutex_unlock(&dev->struct_mutex);
+       } else {
+               /* get a reference to the master */
+               priv->master = drm_master_get(priv->minor->master);
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       list_add(&priv->lhead, &dev->filelist);
+       mutex_unlock(&dev->struct_mutex);
+
+#ifdef __alpha__
+       /*
+        * Default the hose
+        */
+       if (!dev->hose) {
+               struct pci_dev *pci_dev;
+               pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+               if (pci_dev) {
+                       dev->hose = pci_dev->sysdata;
+                       pci_dev_put(pci_dev);
+               }
+               if (!dev->hose) {
+                       struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+                       if (b)
+                               dev->hose = b->sysdata;
+               }
+       }
+#endif
+
+       return 0;
+      out_free:
+       kfree(priv);
+       filp->private_data = NULL;
+       return ret;
+}
+
+/** No-op. */
+int drm_fasync(int fd, struct file *filp, int on)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+
+       DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
+                 (long)old_encode_dev(priv->minor->device));
+       return fasync_helper(fd, filp, on, &dev->buf_async);
+}
+EXPORT_SYMBOL(drm_fasync);
+
+/*
+ * Reclaim locked buffers; note that this may be a bad idea if the current
+ * context doesn't have the hw lock...
+ */
+static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
+{
+       struct drm_file *file_priv = f->private_data;
+
+       if (drm_i_have_hw_lock(dev, file_priv)) {
+               dev->driver->reclaim_buffers_locked(dev, file_priv);
+       } else {
+               unsigned long _end = jiffies + 3 * DRM_HZ;
+               int locked = 0;
+
+               drm_idlelock_take(&file_priv->master->lock);
+
+               /*
+                * Wait for a while.
+                */
+               do {
+                       spin_lock_bh(&file_priv->master->lock.spinlock);
+                       locked = file_priv->master->lock.idle_has_lock;
+                       spin_unlock_bh(&file_priv->master->lock.spinlock);
+                       if (locked)
+                               break;
+                       schedule();
+               } while (!time_after_eq(jiffies, _end));
+
+               if (!locked) {
+                       DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
+                                 "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
+                                 "\tI will go on reclaiming the buffers anyway.\n");
+               }
+
+               dev->driver->reclaim_buffers_locked(dev, file_priv);
+               drm_idlelock_release(&file_priv->master->lock);
+       }
+}
+
+static void drm_master_release(struct drm_device *dev, struct file *filp)
+{
+       struct drm_file *file_priv = filp->private_data;
+
+       if (dev->driver->reclaim_buffers_locked &&
+           file_priv->master->lock.hw_lock)
+               drm_reclaim_locked_buffers(dev, filp);
+
+       if (dev->driver->reclaim_buffers_idlelocked &&
+           file_priv->master->lock.hw_lock) {
+               drm_idlelock_take(&file_priv->master->lock);
+               dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
+               drm_idlelock_release(&file_priv->master->lock);
+       }
+
+
+       if (drm_i_have_hw_lock(dev, file_priv)) {
+               DRM_DEBUG("File %p released, freeing lock for context %d\n",
+                         filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+               drm_lock_free(&file_priv->master->lock,
+                             _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !dev->driver->reclaim_buffers_locked) {
+               dev->driver->reclaim_buffers(dev, file_priv);
+       }
+}
+
+static void drm_events_release(struct drm_file *file_priv)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_pending_event *e, *et;
+       struct drm_pending_vblank_event *v, *vt;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       /* Remove pending flips */
+       list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
+               if (v->base.file_priv == file_priv) {
+                       list_del(&v->base.link);
+                       drm_vblank_put(dev, v->pipe);
+                       v->base.destroy(&v->base);
+               }
+
+       /* Remove unconsumed events */
+       list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+               e->destroy(e);
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * Release file.
+ *
+ * \param inode device inode
+ * \param file_priv DRM file private.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the hardware lock is held then free it, and take it again for the kernel
+ * context since it's necessary to reclaim buffers. Unlink the file private
+ * data from its list and free it. Decreases the open count and if it reaches
+ * zero calls drm_lastclose().
+ */
+int drm_release(struct inode *inode, struct file *filp)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev = file_priv->minor->dev;
+       int retcode = 0;
+
+       lock_kernel();
+
+       DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+       if (dev->driver->preclose)
+               dev->driver->preclose(dev, file_priv);
+
+       /* ========================================================
+        * Begin inline drm_release
+        */
+
+       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+                 task_pid_nr(current),
+                 (long)old_encode_dev(file_priv->minor->device),
+                 dev->open_count);
+
+       /* if the master has gone away we can't do anything with the lock */
+       if (file_priv->minor->master)
+               drm_master_release(dev, filp);
+
+       drm_events_release(file_priv);
+
+       if (dev->driver->driver_features & DRIVER_GEM)
+               drm_gem_release(dev, file_priv);
+
+       if (dev->driver->driver_features & DRIVER_MODESET)
+               drm_fb_release(file_priv);
+
+       mutex_lock(&dev->ctxlist_mutex);
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->tag == file_priv &&
+                           pos->handle != DRM_KERNEL_CONTEXT) {
+                               if (dev->driver->context_dtor)
+                                       dev->driver->context_dtor(dev,
+                                                                 pos->handle);
+
+                               drm_ctxbitmap_free(dev, pos->handle);
+
+                               list_del(&pos->head);
+                               kfree(pos);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (file_priv->is_master) {
+               struct drm_master *master = file_priv->master;
+               struct drm_file *temp;
+               list_for_each_entry(temp, &dev->filelist, lhead) {
+                       if ((temp->master == file_priv->master) &&
+                           (temp != file_priv))
+                               temp->authenticated = 0;
+               }
+
+               /**
+                * Since the master is disappearing, so is the
+                * possibility to lock.
+                */
+
+               if (master->lock.hw_lock) {
+                       if (dev->sigdata.lock == master->lock.hw_lock)
+                               dev->sigdata.lock = NULL;
+                       master->lock.hw_lock = NULL;
+                       master->lock.file_priv = NULL;
+                       wake_up_interruptible_all(&master->lock.lock_queue);
+               }
+
+               if (file_priv->minor->master == file_priv->master) {
+                       /* drop the reference held my the minor */
+                       if (dev->driver->master_drop)
+                               dev->driver->master_drop(dev, file_priv, true);
+                       drm_master_put(&file_priv->minor->master);
+               }
+       }
+
+       /* drop the reference held my the file priv */
+       drm_master_put(&file_priv->master);
+       file_priv->is_master = 0;
+       list_del(&file_priv->lhead);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (dev->driver->postclose)
+               dev->driver->postclose(dev, file_priv);
+       kfree(file_priv);
+
+       /* ========================================================
+        * End inline drm_release
+        */
+
+       atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+       spin_lock(&dev->count_lock);
+       if (!--dev->open_count) {
+               if (atomic_read(&dev->ioctl_count)) {
+                       DRM_ERROR("Device busy: %d\n",
+                                 atomic_read(&dev->ioctl_count));
+                       spin_unlock(&dev->count_lock);
+                       unlock_kernel();
+                       return -EBUSY;
+               }
+               spin_unlock(&dev->count_lock);
+               unlock_kernel();
+               return drm_lastclose(dev);
+       }
+       spin_unlock(&dev->count_lock);
+
+       unlock_kernel();
+
+       return retcode;
+}
+EXPORT_SYMBOL(drm_release);
+
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+                 size_t total, size_t max, struct drm_pending_event **out)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_pending_event *e;
+       unsigned long flags;
+       bool ret = false;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       *out = NULL;
+       if (list_empty(&file_priv->event_list))
+               goto out;
+       e = list_first_entry(&file_priv->event_list,
+                            struct drm_pending_event, link);
+       if (e->event->length + total > max)
+               goto out;
+
+       file_priv->event_space += e->event->length;
+       list_del(&e->link);
+       *out = e;
+       ret = true;
+
+out:
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+       return ret;
+}
+
+ssize_t drm_read(struct file *filp, char __user *buffer,
+                size_t count, loff_t *offset)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_pending_event *e;
+       size_t total;
+       ssize_t ret;
+
+       ret = wait_event_interruptible(file_priv->event_wait,
+                                      !list_empty(&file_priv->event_list));
+       if (ret < 0)
+               return ret;
+
+       total = 0;
+       while (drm_dequeue_event(file_priv, total, count, &e)) {
+               if (copy_to_user(buffer + total,
+                                e->event, e->event->length)) {
+                       total = -EFAULT;
+                       break;
+               }
+
+               total += e->event->length;
+               e->destroy(e);
+       }
+
+       return total;
+}
+EXPORT_SYMBOL(drm_read);
+
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+{
+       struct drm_file *file_priv = filp->private_data;
+       unsigned int mask = 0;
+
+       poll_wait(filp, &file_priv->event_wait, wait);
+
+       if (!list_empty(&file_priv->event_list))
+               mask |= POLLIN | POLLRDNORM;
+
+       return mask;
+}
+EXPORT_SYMBOL(drm_poll);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_gem.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_gem.c
new file mode 100644 (file)
index 0000000..e9dbb48
--- /dev/null
@@ -0,0 +1,573 @@
+/*
+ * Copyright Â© 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include "drmP.h"
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls.  However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file.  However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ *   default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ *   handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls.  The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+       struct drm_gem_mm *mm;
+
+       spin_lock_init(&dev->object_name_lock);
+       idr_init(&dev->object_name_idr);
+       atomic_set(&dev->object_count, 0);
+       atomic_set(&dev->object_memory, 0);
+       atomic_set(&dev->pin_count, 0);
+       atomic_set(&dev->pin_memory, 0);
+       atomic_set(&dev->gtt_count, 0);
+       atomic_set(&dev->gtt_memory, 0);
+
+       mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
+       if (!mm) {
+               DRM_ERROR("out of memory\n");
+               return -ENOMEM;
+       }
+
+       dev->mm_private = mm;
+
+       if (drm_ht_create(&mm->offset_hash, 19)) {
+               kfree(mm);
+               return -ENOMEM;
+       }
+
+       if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+                       DRM_FILE_PAGE_OFFSET_SIZE)) {
+               drm_ht_remove(&mm->offset_hash);
+               kfree(mm);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void
+drm_gem_destroy(struct drm_device *dev)
+{
+       struct drm_gem_mm *mm = dev->mm_private;
+
+       drm_mm_takedown(&mm->offset_manager);
+       drm_ht_remove(&mm->offset_hash);
+       kfree(mm);
+       dev->mm_private = NULL;
+}
+
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+       struct drm_gem_object *obj;
+
+       BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (!obj)
+               goto free;
+
+       obj->dev = dev;
+       obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+       if (IS_ERR(obj->filp))
+               goto free;
+
+       /* Basically we want to disable the OOM killer and handle ENOMEM
+        * ourselves by sacrificing pages from cached buffers.
+        * XXX shmem_file_[gs]et_gfp_mask()
+        */
+       mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
+                            GFP_HIGHUSER |
+                            __GFP_COLD |
+                            __GFP_FS |
+                            __GFP_RECLAIMABLE |
+                            __GFP_NORETRY |
+                            __GFP_NOWARN |
+                            __GFP_NOMEMALLOC);
+
+       kref_init(&obj->refcount);
+       kref_init(&obj->handlecount);
+       obj->size = size;
+       if (dev->driver->gem_init_object != NULL &&
+           dev->driver->gem_init_object(obj) != 0) {
+               goto fput;
+       }
+       atomic_inc(&dev->object_count);
+       atomic_add(obj->size, &dev->object_memory);
+       return obj;
+fput:
+       fput(obj->filp);
+free:
+       kfree(obj);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_gem_object_alloc);
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+{
+       struct drm_device *dev;
+       struct drm_gem_object *obj;
+
+       /* This is gross. The idr system doesn't let us try a delete and
+        * return an error code.  It just spews if you fail at deleting.
+        * So, we have to grab a lock around finding the object and then
+        * doing the delete on it and dropping the refcount, or the user
+        * could race us to double-decrement the refcount and cause a
+        * use-after-free later.  Given the frequency of our handle lookups,
+        * we may want to use ida for number allocation and a hash table
+        * for the pointers, anyway.
+        */
+       spin_lock(&filp->table_lock);
+
+       /* Check if we currently have a reference on the object */
+       obj = idr_find(&filp->object_idr, handle);
+       if (obj == NULL) {
+               spin_unlock(&filp->table_lock);
+               return -EINVAL;
+       }
+       dev = obj->dev;
+
+       /* Release reference and decrement refcount. */
+       idr_remove(&filp->object_idr, handle);
+       spin_unlock(&filp->table_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_handle_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+                      struct drm_gem_object *obj,
+                      u32 *handlep)
+{
+       int     ret;
+
+       /*
+        * Get the user-visible handle using idr.
+        */
+again:
+       /* ensure there is space available to allocate a handle */
+       if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+               return -ENOMEM;
+
+       /* do the allocation under our spinlock */
+       spin_lock(&file_priv->table_lock);
+       ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
+       spin_unlock(&file_priv->table_lock);
+       if (ret == -EAGAIN)
+               goto again;
+
+       if (ret != 0)
+               return ret;
+
+       drm_gem_object_handle_reference(obj);
+       return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+                     u32 handle)
+{
+       struct drm_gem_object *obj;
+
+       spin_lock(&filp->table_lock);
+
+       /* Check if we currently have a reference on the object */
+       obj = idr_find(&filp->object_idr, handle);
+       if (obj == NULL) {
+               spin_unlock(&filp->table_lock);
+               return NULL;
+       }
+
+       drm_gem_object_reference(obj);
+
+       spin_unlock(&filp->table_lock);
+
+       return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_gem_close *args = data;
+       int ret;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       ret = drm_gem_handle_delete(file_priv, args->handle);
+
+       return ret;
+}
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_gem_flink *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -EBADF;
+
+again:
+       if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       spin_lock(&dev->object_name_lock);
+       if (!obj->name) {
+               ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
+                                       &obj->name);
+               args->name = (uint64_t) obj->name;
+               spin_unlock(&dev->object_name_lock);
+
+               if (ret == -EAGAIN)
+                       goto again;
+
+               if (ret != 0)
+                       goto err;
+
+               /* Allocate a reference for the name table.  */
+               drm_gem_object_reference(obj);
+       } else {
+               args->name = (uint64_t) obj->name;
+               spin_unlock(&dev->object_name_lock);
+               ret = 0;
+       }
+
+err:
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv)
+{
+       struct drm_gem_open *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+       u32 handle;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       spin_lock(&dev->object_name_lock);
+       obj = idr_find(&dev->object_name_idr, (int) args->name);
+       if (obj)
+               drm_gem_object_reference(obj);
+       spin_unlock(&dev->object_name_lock);
+       if (!obj)
+               return -ENOENT;
+
+       ret = drm_gem_handle_create(file_priv, obj, &handle);
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       args->handle = handle;
+       args->size = obj->size;
+
+       return 0;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+       idr_init(&file_private->object_idr);
+       spin_lock_init(&file_private->table_lock);
+}
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+       struct drm_gem_object *obj = ptr;
+
+       drm_gem_object_handle_unreference(obj);
+
+       return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_for_each(&file_private->object_idr,
+                    &drm_gem_object_release_handle, NULL);
+
+       idr_destroy(&file_private->object_idr);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Called after the last reference to the object has been lost.
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+       struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+       struct drm_device *dev = obj->dev;
+
+       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (dev->driver->gem_free_object != NULL)
+               dev->driver->gem_free_object(obj);
+
+       fput(obj->filp);
+       atomic_dec(&dev->object_count);
+       atomic_sub(obj->size, &dev->object_memory);
+       kfree(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void
+drm_gem_object_handle_free(struct kref *kref)
+{
+       struct drm_gem_object *obj = container_of(kref,
+                                                 struct drm_gem_object,
+                                                 handlecount);
+       struct drm_device *dev = obj->dev;
+
+       /* Remove any name for this object */
+       spin_lock(&dev->object_name_lock);
+       if (obj->name) {
+               idr_remove(&dev->object_name_idr, obj->name);
+               obj->name = 0;
+               spin_unlock(&dev->object_name_lock);
+               /*
+                * The object name held a reference to this object, drop
+                * that now.
+                */
+               drm_gem_object_unreference(obj);
+       } else
+               spin_unlock(&dev->object_name_lock);
+
+}
+EXPORT_SYMBOL(drm_gem_object_handle_free);
+
+void drm_gem_vm_open(struct vm_area_struct *vma)
+{
+       struct drm_gem_object *obj = vma->vm_private_data;
+
+       drm_gem_object_reference(obj);
+}
+EXPORT_SYMBOL(drm_gem_vm_open);
+
+void drm_gem_vm_close(struct vm_area_struct *vma)
+{
+       struct drm_gem_object *obj = vma->vm_private_data;
+       struct drm_device *dev = obj->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_vm_close);
+
+
+/**
+ * drm_gem_mmap - memory map routine for GEM objects
+ * @filp: DRM file pointer
+ * @vma: VMA for the area to be mapped
+ *
+ * If a driver supports GEM object mapping, mmap calls on the DRM file
+ * descriptor will end up here.
+ *
+ * If we find the object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created when the GTT map ioctl was called on
+ * the object), we set up the driver fault handler so that any accesses
+ * to the object can be trapped, to perform migration, GTT binding, surface
+ * register allocation, or performance monitoring.
+ */
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_gem_mm *mm = dev->mm_private;
+       struct drm_local_map *map = NULL;
+       struct drm_gem_object *obj;
+       struct drm_hash_item *hash;
+       int ret = 0;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+               mutex_unlock(&dev->struct_mutex);
+               return drm_mmap(filp, vma);
+       }
+
+       map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+       if (!map ||
+           ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
+               ret =  -EPERM;
+               goto out_unlock;
+       }
+
+       /* Check for valid size. */
+       if (map->size < vma->vm_end - vma->vm_start) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       obj = map->handle;
+       if (!obj->dev->driver->gem_vm_ops) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+       vma->vm_ops = obj->dev->driver->gem_vm_ops;
+       vma->vm_private_data = map->handle;
+       vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+       /* Take a ref for this mapping of the object, so that the fault
+        * handler can dereference the mmap offset's pointer to the object.
+        * This reference is cleaned up by the corresponding vm_close
+        * (which should happen whether the vma was created by this call, or
+        * by a vm_open due to mremap or partial unmap or whatever).
+        */
+       drm_gem_object_reference(obj);
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_gem_mmap);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_hashtab.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_hashtab.c
new file mode 100644 (file)
index 0000000..f36b21c
--- /dev/null
@@ -0,0 +1,206 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm_hashtab.h"
+#include <linux/hash.h>
+
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
+{
+       unsigned int i;
+
+       ht->size = 1 << order;
+       ht->order = order;
+       ht->fill = 0;
+       ht->table = NULL;
+       ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
+       if (!ht->use_vmalloc) {
+               ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
+       }
+       if (!ht->table) {
+               ht->use_vmalloc = 1;
+               ht->table = vmalloc(ht->size*sizeof(*ht->table));
+       }
+       if (!ht->table) {
+               DRM_ERROR("Out of memory for hash table\n");
+               return -ENOMEM;
+       }
+       for (i=0; i< ht->size; ++i) {
+               INIT_HLIST_HEAD(&ht->table[i]);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_create);
+
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list;
+       unsigned int hashed_key;
+       int count = 0;
+
+       hashed_key = hash_long(key, ht->order);
+       DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+       h_list = &ht->table[hashed_key];
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+       }
+}
+
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
+                                         unsigned long key)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list;
+       unsigned int hashed_key;
+
+       hashed_key = hash_long(key, ht->order);
+       h_list = &ht->table[hashed_key];
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               if (entry->key == key)
+                       return list;
+               if (entry->key > key)
+                       break;
+       }
+       return NULL;
+}
+
+
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list, *parent;
+       unsigned int hashed_key;
+       unsigned long key = item->key;
+
+       hashed_key = hash_long(key, ht->order);
+       h_list = &ht->table[hashed_key];
+       parent = NULL;
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               if (entry->key == key)
+                       return -EINVAL;
+               if (entry->key > key)
+                       break;
+               parent = list;
+       }
+       if (parent) {
+               hlist_add_after(parent, &item->head);
+       } else {
+               hlist_add_head(&item->head, h_list);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_insert_item);
+
+/*
+ * Just insert an item and return any "bits" bit key that hasn't been
+ * used before.
+ */
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+                             unsigned long seed, int bits, int shift,
+                             unsigned long add)
+{
+       int ret;
+       unsigned long mask = (1 << bits) - 1;
+       unsigned long first, unshifted_key;
+
+       unshifted_key = hash_long(seed, bits);
+       first = unshifted_key;
+       do {
+               item->key = (unshifted_key << shift) + add;
+               ret = drm_ht_insert_item(ht, item);
+               if (ret)
+                       unshifted_key = (unshifted_key + 1) & mask;
+       } while(ret && (unshifted_key != first));
+
+       if (ret) {
+               DRM_ERROR("Available key bit space exhausted\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_just_insert_please);
+
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+                    struct drm_hash_item **item)
+{
+       struct hlist_node *list;
+
+       list = drm_ht_find_key(ht, key);
+       if (!list)
+               return -EINVAL;
+
+       *item = hlist_entry(list, struct drm_hash_item, head);
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_find_item);
+
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
+{
+       struct hlist_node *list;
+
+       list = drm_ht_find_key(ht, key);
+       if (list) {
+               hlist_del_init(list);
+               ht->fill--;
+               return 0;
+       }
+       return -EINVAL;
+}
+
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+       hlist_del_init(&item->head);
+       ht->fill--;
+       return 0;
+}
+EXPORT_SYMBOL(drm_ht_remove_item);
+
+void drm_ht_remove(struct drm_open_hash *ht)
+{
+       if (ht->table) {
+               if (ht->use_vmalloc)
+                       vfree(ht->table);
+               else
+                       kfree(ht->table);
+               ht->table = NULL;
+       }
+}
+EXPORT_SYMBOL(drm_ht_remove);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_info.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_info.c
new file mode 100644 (file)
index 0000000..f0f6c6b
--- /dev/null
@@ -0,0 +1,328 @@
+/**
+ * \file drm_info.c
+ * DRM info file implementations
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+/**
+ * Called when "/proc/dri/.../name" is read.
+ *
+ * Prints the device name together with the bus id if available.
+ */
+int drm_name_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_minor *minor = node->minor;
+       struct drm_device *dev = minor->dev;
+       struct drm_master *master = minor->master;
+
+       if (!master)
+               return 0;
+
+       if (master->unique) {
+               seq_printf(m, "%s %s %s\n",
+                          dev->driver->pci_driver.name,
+                          pci_name(dev->pdev), master->unique);
+       } else {
+               seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
+                          pci_name(dev->pdev));
+       }
+
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vm" is read.
+ *
+ * Prints information about all mappings in drm_device::maplist.
+ */
+int drm_vm_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_local_map *map;
+       struct drm_map_list *r_list;
+
+       /* Hardcoded from _DRM_FRAME_BUFFER,
+          _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+          _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+       const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+       const char *type;
+       int i;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "slot      offset       size type flags    address mtrr\n\n");
+       i = 0;
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               map = r_list->map;
+               if (!map)
+                       continue;
+               if (map->type < 0 || map->type > 5)
+                       type = "??";
+               else
+                       type = types[map->type];
+
+               seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s  0x%02x 0x%08lx ",
+                          i,
+                          (unsigned long long)map->offset,
+                          map->size, type, map->flags,
+                          (unsigned long) r_list->user_token);
+               if (map->mtrr < 0)
+                       seq_printf(m, "none\n");
+               else
+                       seq_printf(m, "%4d\n", map->mtrr);
+               i++;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../queues" is read.
+ */
+int drm_queues_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       int i;
+       struct drm_queue *q;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "  ctx/flags   use   fin"
+                  "   blk/rw/rwf  wait    flushed         queued"
+                  "      locks\n\n");
+       for (i = 0; i < dev->queue_count; i++) {
+               q = dev->queuelist[i];
+               atomic_inc(&q->use_count);
+               seq_printf(m,   "%5d/0x%03x %5d %5d"
+                          " %5d/%c%c/%c%c%c %5Zd\n",
+                          i,
+                          q->flags,
+                          atomic_read(&q->use_count),
+                          atomic_read(&q->finalization),
+                          atomic_read(&q->block_count),
+                          atomic_read(&q->block_read) ? 'r' : '-',
+                          atomic_read(&q->block_write) ? 'w' : '-',
+                          waitqueue_active(&q->read_queue) ? 'r' : '-',
+                          waitqueue_active(&q->write_queue) ? 'w' : '-',
+                          waitqueue_active(&q->flush_queue) ? 'f' : '-',
+                          DRM_BUFCOUNT(&q->waitlist));
+               atomic_dec(&q->use_count);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../bufs" is read.
+ */
+int drm_bufs_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_device_dma *dma;
+       int i, seg_pages;
+
+       mutex_lock(&dev->struct_mutex);
+       dma = dev->dma;
+       if (!dma) {
+               mutex_unlock(&dev->struct_mutex);
+               return 0;
+       }
+
+       seq_printf(m, " o     size count  free   segs pages    kB\n\n");
+       for (i = 0; i <= DRM_MAX_ORDER; i++) {
+               if (dma->bufs[i].buf_count) {
+                       seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
+                       seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
+                                  i,
+                                  dma->bufs[i].buf_size,
+                                  dma->bufs[i].buf_count,
+                                  atomic_read(&dma->bufs[i].freelist.count),
+                                  dma->bufs[i].seg_count,
+                                  seg_pages,
+                                  seg_pages * PAGE_SIZE / 1024);
+               }
+       }
+       seq_printf(m, "\n");
+       for (i = 0; i < dma->buf_count; i++) {
+               if (i && !(i % 32))
+                       seq_printf(m, "\n");
+               seq_printf(m, " %d", dma->buflist[i]->list);
+       }
+       seq_printf(m, "\n");
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vblank" is read.
+ */
+int drm_vblank_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       int crtc;
+
+       mutex_lock(&dev->struct_mutex);
+       for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+               seq_printf(m, "CRTC %d enable:     %d\n",
+                          crtc, atomic_read(&dev->vblank_refcount[crtc]));
+               seq_printf(m, "CRTC %d counter:    %d\n",
+                          crtc, drm_vblank_count(dev, crtc));
+               seq_printf(m, "CRTC %d last wait:  %d\n",
+                          crtc, dev->last_vblank_wait[crtc]);
+               seq_printf(m, "CRTC %d in modeset: %d\n",
+                          crtc, dev->vblank_inmodeset[crtc]);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../clients" is read.
+ *
+ */
+int drm_clients_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_file *priv;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "a dev    pid    uid      magic     ioctls\n\n");
+       list_for_each_entry(priv, &dev->filelist, lhead) {
+               seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+                          priv->authenticated ? 'y' : 'n',
+                          priv->minor->index,
+                          priv->pid,
+                          priv->uid, priv->magic, priv->ioctl_count);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+
+int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+       struct drm_gem_object *obj = ptr;
+       struct seq_file *m = data;
+
+       seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
+
+       seq_printf(m, "%6d %8zd %7d %8d\n",
+                  obj->name, obj->size,
+                  atomic_read(&obj->handlecount.refcount),
+                  atomic_read(&obj->refcount.refcount));
+       return 0;
+}
+
+int drm_gem_name_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+
+       seq_printf(m, "  name     size handles refcount\n");
+       idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+       return 0;
+}
+
+int drm_gem_object_info(struct seq_file *m, void* data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+
+       seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
+       seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
+       seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
+       seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
+       seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
+       seq_printf(m, "%d gtt total\n", dev->gtt_total);
+       return 0;
+}
+
+#if DRM_DEBUG_CODE
+
+int drm_vma_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_vma_entry *pt;
+       struct vm_area_struct *vma;
+#if defined(__i386__)
+       unsigned int pgprot;
+#endif
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
+                  atomic_read(&dev->vma_count),
+                  high_memory, (u64)virt_to_phys(high_memory));
+
+       list_for_each_entry(pt, &dev->vmalist, head) {
+               vma = pt->vma;
+               if (!vma)
+                       continue;
+               seq_printf(m,
+                          "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
+                          pt->pid, vma->vm_start, vma->vm_end,
+                          vma->vm_flags & VM_READ ? 'r' : '-',
+                          vma->vm_flags & VM_WRITE ? 'w' : '-',
+                          vma->vm_flags & VM_EXEC ? 'x' : '-',
+                          vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+                          vma->vm_flags & VM_LOCKED ? 'l' : '-',
+                          vma->vm_flags & VM_IO ? 'i' : '-',
+                          vma->vm_pgoff);
+
+#if defined(__i386__)
+               pgprot = pgprot_val(vma->vm_page_prot);
+               seq_printf(m, " %c%c%c%c%c%c%c%c%c",
+                          pgprot & _PAGE_PRESENT ? 'p' : '-',
+                          pgprot & _PAGE_RW ? 'w' : 'r',
+                          pgprot & _PAGE_USER ? 'u' : 's',
+                          pgprot & _PAGE_PWT ? 't' : 'b',
+                          pgprot & _PAGE_PCD ? 'u' : 'c',
+                          pgprot & _PAGE_ACCESSED ? 'a' : '-',
+                          pgprot & _PAGE_DIRTY ? 'd' : '-',
+                          pgprot & _PAGE_PSE ? 'm' : 'k',
+                          pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+#endif
+               seq_printf(m, "\n");
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+#endif
+
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_ioctl.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_ioctl.c
new file mode 100644 (file)
index 0000000..9b9ff46
--- /dev/null
@@ -0,0 +1,357 @@
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan  8 09:01:26 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_core.h"
+
+#include "linux/pci.h"
+
+/**
+ * Get the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from drm_device::unique into user space.
+ */
+int drm_getunique(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_unique *u = data;
+       struct drm_master *master = file_priv->master;
+
+       if (u->unique_len >= master->unique_len) {
+               if (copy_to_user(u->unique, master->unique, master->unique_len))
+                       return -EFAULT;
+       }
+       u->unique_len = master->unique_len;
+
+       return 0;
+}
+
+/**
+ * Set the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from userspace into drm_device::unique, and verifies that
+ * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
+ * in interface version 1.1 and will return EBUSY when setversion has requested
+ * version 1.1 or greater.
+ */
+int drm_setunique(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_unique *u = data;
+       struct drm_master *master = file_priv->master;
+       int domain, bus, slot, func, ret;
+
+       if (master->unique_len || master->unique)
+               return -EBUSY;
+
+       if (!u->unique_len || u->unique_len > 1024)
+               return -EINVAL;
+
+       master->unique_len = u->unique_len;
+       master->unique_size = u->unique_len + 1;
+       master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+       if (!master->unique)
+               return -ENOMEM;
+       if (copy_from_user(master->unique, u->unique, master->unique_len))
+               return -EFAULT;
+
+       master->unique[master->unique_len] = '\0';
+
+       dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
+                              strlen(master->unique) + 2, GFP_KERNEL);
+       if (!dev->devname)
+               return -ENOMEM;
+
+       sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+               master->unique);
+
+       /* Return error if the busid submitted doesn't match the device's actual
+        * busid.
+        */
+       ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+       if (ret != 3)
+               return -EINVAL;
+       domain = bus >> 8;
+       bus &= 0xff;
+
+       if ((domain != drm_get_pci_domain(dev)) ||
+           (bus != dev->pdev->bus->number) ||
+           (slot != PCI_SLOT(dev->pdev->devfn)) ||
+           (func != PCI_FUNC(dev->pdev->devfn)))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_master *master = file_priv->master;
+       int len;
+
+       if (master->unique != NULL)
+               return -EBUSY;
+
+       master->unique_len = 40;
+       master->unique_size = master->unique_len;
+       master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+       if (master->unique == NULL)
+               return -ENOMEM;
+
+       len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
+                      drm_get_pci_domain(dev),
+                      dev->pdev->bus->number,
+                      PCI_SLOT(dev->pdev->devfn),
+                      PCI_FUNC(dev->pdev->devfn));
+       if (len >= master->unique_len)
+               DRM_ERROR("buffer overflow");
+       else
+               master->unique_len = len;
+
+       dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
+                              master->unique_len + 2, GFP_KERNEL);
+       if (dev->devname == NULL)
+               return -ENOMEM;
+
+       sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+               master->unique);
+
+       return 0;
+}
+
+/**
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+int drm_getmap(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_map *map = data;
+       struct drm_map_list *r_list = NULL;
+       struct list_head *list;
+       int idx;
+       int i;
+
+       idx = map->offset;
+
+       mutex_lock(&dev->struct_mutex);
+       if (idx < 0) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       i = 0;
+       list_for_each(list, &dev->maplist) {
+               if (i == idx) {
+                       r_list = list_entry(list, struct drm_map_list, head);
+                       break;
+               }
+               i++;
+       }
+       if (!r_list || !r_list->map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       map->offset = r_list->map->offset;
+       map->size = r_list->map->size;
+       map->type = r_list->map->type;
+       map->flags = r_list->map->flags;
+       map->handle = (void *)(unsigned long) r_list->user_token;
+       map->mtrr = r_list->map->mtrr;
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Get client information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_client structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the client with the specified index and copies its information
+ * into userspace
+ */
+int drm_getclient(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_client *client = data;
+       struct drm_file *pt;
+       int idx;
+       int i;
+
+       idx = client->idx;
+       mutex_lock(&dev->struct_mutex);
+
+       i = 0;
+       list_for_each_entry(pt, &dev->filelist, lhead) {
+               if (i++ >= idx) {
+                       client->auth = pt->authenticated;
+                       client->pid = pt->pid;
+                       client->uid = pt->uid;
+                       client->magic = pt->magic;
+                       client->iocs = pt->ioctl_count;
+                       mutex_unlock(&dev->struct_mutex);
+
+                       return 0;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return -EINVAL;
+}
+
+/**
+ * Get statistics information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_stats structure.
+ *
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getstats(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_stats *stats = data;
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+
+       mutex_lock(&dev->struct_mutex);
+
+       for (i = 0; i < dev->counters; i++) {
+               if (dev->types[i] == _DRM_STAT_LOCK)
+                       stats->data[i].value =
+                           (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+               else
+                       stats->data[i].value = atomic_read(&dev->counts[i]);
+               stats->data[i].type = dev->types[i];
+       }
+
+       stats->count = dev->counters;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Setversion ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Sets the requested interface version
+ */
+int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_set_version *sv = data;
+       int if_version, retcode = 0;
+
+       if (sv->drm_di_major != -1) {
+               if (sv->drm_di_major != DRM_IF_MAJOR ||
+                   sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
+                       retcode = -EINVAL;
+                       goto done;
+               }
+               if_version = DRM_IF_VERSION(sv->drm_di_major,
+                                           sv->drm_di_minor);
+               dev->if_version = max(if_version, dev->if_version);
+               if (sv->drm_di_minor >= 1) {
+                       /*
+                        * Version 1.1 includes tying of DRM to specific device
+                        */
+                       drm_set_busid(dev, file_priv);
+               }
+       }
+
+       if (sv->drm_dd_major != -1) {
+               if (sv->drm_dd_major != dev->driver->major ||
+                   sv->drm_dd_minor < 0 || sv->drm_dd_minor >
+                   dev->driver->minor) {
+                       retcode = -EINVAL;
+                       goto done;
+               }
+
+               if (dev->driver->set_version)
+                       dev->driver->set_version(dev, sv);
+       }
+
+done:
+       sv->drm_di_major = DRM_IF_MAJOR;
+       sv->drm_di_minor = DRM_IF_MINOR;
+       sv->drm_dd_major = dev->driver->major;
+       sv->drm_dd_minor = dev->driver->minor;
+
+       return retcode;
+}
+
+/** No-op ioctl. */
+int drm_noop(struct drm_device *dev, void *data,
+            struct drm_file *file_priv)
+{
+       DRM_DEBUG("\n");
+       return 0;
+}
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_irq.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_irq.c
new file mode 100644 (file)
index 0000000..7998ee6
--- /dev/null
@@ -0,0 +1,771 @@
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#include <linux/interrupt.h>   /* For task queue support */
+
+#include <linux/vgaarb.h>
+/**
+ * Get interrupt from bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_irq_busid *p = data;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+           (p->busnum & 0xff) != dev->pdev->bus->number ||
+           p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+               return -EINVAL;
+
+       p->irq = dev->pdev->irq;
+
+       DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+                 p->irq);
+
+       return 0;
+}
+
+static void vblank_disable_fn(unsigned long arg)
+{
+       struct drm_device *dev = (struct drm_device *)arg;
+       unsigned long irqflags;
+       int i;
+
+       if (!dev->vblank_disable_allowed)
+               return;
+
+       for (i = 0; i < dev->num_crtcs; i++) {
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+               if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+                   dev->vblank_enabled[i]) {
+                       DRM_DEBUG("disabling vblank on crtc %d\n", i);
+                       dev->last_vblank[i] =
+                               dev->driver->get_vblank_counter(dev, i);
+                       dev->driver->disable_vblank(dev, i);
+                       dev->vblank_enabled[i] = 0;
+               }
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+       }
+}
+
+void drm_vblank_cleanup(struct drm_device *dev)
+{
+       /* Bail if the driver didn't call drm_vblank_init() */
+       if (dev->num_crtcs == 0)
+               return;
+
+       del_timer(&dev->vblank_disable_timer);
+
+       vblank_disable_fn((unsigned long)dev);
+
+       kfree(dev->vbl_queue);
+       kfree(dev->_vblank_count);
+       kfree(dev->vblank_refcount);
+       kfree(dev->vblank_enabled);
+       kfree(dev->last_vblank);
+       kfree(dev->last_vblank_wait);
+       kfree(dev->vblank_inmodeset);
+
+       dev->num_crtcs = 0;
+}
+
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+       int i, ret = -ENOMEM;
+
+       setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+                   (unsigned long)dev);
+       spin_lock_init(&dev->vbl_lock);
+       dev->num_crtcs = num_crtcs;
+
+       dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
+                                GFP_KERNEL);
+       if (!dev->vbl_queue)
+               goto err;
+
+       dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
+       if (!dev->_vblank_count)
+               goto err;
+
+       dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
+                                      GFP_KERNEL);
+       if (!dev->vblank_refcount)
+               goto err;
+
+       dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
+       if (!dev->vblank_enabled)
+               goto err;
+
+       dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
+       if (!dev->last_vblank)
+               goto err;
+
+       dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
+       if (!dev->last_vblank_wait)
+               goto err;
+
+       dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
+       if (!dev->vblank_inmodeset)
+               goto err;
+
+       /* Zero per-crtc vblank stuff */
+       for (i = 0; i < num_crtcs; i++) {
+               init_waitqueue_head(&dev->vbl_queue[i]);
+               atomic_set(&dev->_vblank_count[i], 0);
+               atomic_set(&dev->vblank_refcount[i], 0);
+       }
+
+       dev->vblank_disable_allowed = 0;
+
+       return 0;
+
+err:
+       drm_vblank_cleanup(dev);
+       return ret;
+}
+EXPORT_SYMBOL(drm_vblank_init);
+
+static void drm_irq_vgaarb_nokms(void *cookie, bool state)
+{
+       struct drm_device *dev = cookie;
+
+       if (dev->driver->vgaarb_irq) {
+               dev->driver->vgaarb_irq(dev, state);
+               return;
+       }
+
+       if (!dev->irq_enabled)
+               return;
+
+       if (state)
+               dev->driver->irq_uninstall(dev);
+       else {
+               dev->driver->irq_preinstall(dev);
+               dev->driver->irq_postinstall(dev);
+       }
+}
+
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Initializes the IRQ related data. Installs the handler, calling the driver
+ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
+ * before and after the installation.
+ */
+int drm_irq_install(struct drm_device *dev)
+{
+       int ret = 0;
+       unsigned long sh_flags = 0;
+       char *irqname;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       if (dev->pdev->irq == 0)
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+
+       /* Driver must have been initialized */
+       if (!dev->dev_private) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       if (dev->irq_enabled) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EBUSY;
+       }
+       dev->irq_enabled = 1;
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+
+       /* Before installing handler */
+       dev->driver->irq_preinstall(dev);
+
+       /* Install handler */
+       if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
+               sh_flags = IRQF_SHARED;
+
+       if (dev->devname)
+               irqname = dev->devname;
+       else
+               irqname = dev->driver->name;
+
+       ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
+                         sh_flags, irqname, dev);
+
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
+
+       /* After installing handler */
+       ret = dev->driver->irq_postinstall(dev);
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_irq_install);
+
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
+ */
+int drm_irq_uninstall(struct drm_device * dev)
+{
+       unsigned long irqflags;
+       int irq_enabled, i;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+       irq_enabled = dev->irq_enabled;
+       dev->irq_enabled = 0;
+       mutex_unlock(&dev->struct_mutex);
+
+       /*
+        * Wake up any waiters so they don't hang.
+        */
+       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       for (i = 0; i < dev->num_crtcs; i++) {
+               DRM_WAKEUP(&dev->vbl_queue[i]);
+               dev->vblank_enabled[i] = 0;
+               dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
+       }
+       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+       if (!irq_enabled)
+               return -EINVAL;
+
+       DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               vga_client_register(dev->pdev, NULL, NULL, NULL);
+
+       dev->driver->irq_uninstall(dev);
+
+       free_irq(dev->pdev->irq, dev);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_irq_uninstall);
+
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
+int drm_control(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_control *ctl = data;
+
+       /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+
+
+       switch (ctl->func) {
+       case DRM_INST_HANDLER:
+               if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+                       return 0;
+               if (drm_core_check_feature(dev, DRIVER_MODESET))
+                       return 0;
+               if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+                   ctl->irq != dev->pdev->irq)
+                       return -EINVAL;
+               return drm_irq_install(dev);
+       case DRM_UNINST_HANDLER:
+               if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+                       return 0;
+               if (drm_core_check_feature(dev, DRIVER_MODESET))
+                       return 0;
+               return drm_irq_uninstall(dev);
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+       return atomic_read(&dev->_vblank_count[crtc]);
+}
+EXPORT_SYMBOL(drm_vblank_count);
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc).  Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+{
+       u32 cur_vblank, diff;
+
+       /*
+        * Interrupts were disabled prior to this call, so deal with counter
+        * wrap if needed.
+        * NOTE!  It's possible we lost a full dev->max_vblank_count events
+        * here if the register is small or we had vblank interrupts off for
+        * a long time.
+        */
+       cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+       diff = cur_vblank - dev->last_vblank[crtc];
+       if (cur_vblank < dev->last_vblank[crtc]) {
+               diff += dev->max_vblank_count;
+
+               DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+                         crtc, dev->last_vblank[crtc], cur_vblank, diff);
+       }
+
+       DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+                 crtc, diff);
+
+       atomic_add(diff, &dev->_vblank_count[crtc]);
+}
+
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
+{
+       unsigned long irqflags;
+       int ret = 0;
+
+       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       /* Going from 0->1 means we have to enable interrupts again */
+       if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+               if (!dev->vblank_enabled[crtc]) {
+                       ret = dev->driver->enable_vblank(dev, crtc);
+                       DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+                       if (ret)
+                               atomic_dec(&dev->vblank_refcount[crtc]);
+                       else {
+                               dev->vblank_enabled[crtc] = 1;
+                               drm_update_vblank_count(dev, crtc);
+                       }
+               }
+       } else {
+               if (!dev->vblank_enabled[crtc]) {
+                       atomic_dec(&dev->vblank_refcount[crtc]);
+                       ret = -EINVAL;
+               }
+       }
+       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_vblank_get);
+
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
+{
+       BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+
+       /* Last user schedules interrupt disable */
+       if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
+               mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+}
+EXPORT_SYMBOL(drm_vblank_put);
+
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       DRM_WAKEUP(&dev->vbl_queue[crtc]);
+       dev->vblank_enabled[crtc] = 0;
+       dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_off);
+
+/**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @post: post or pre mode set?
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
+ */
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+{
+       /*
+        * To avoid all the problems that might happen if interrupts
+        * were enabled/disabled around or between these calls, we just
+        * have the kernel take a reference on the CRTC (just once though
+        * to avoid corrupting the count if multiple, mismatch calls occur),
+        * so that interrupts remain enabled in the interim.
+        */
+       if (!dev->vblank_inmodeset[crtc]) {
+               dev->vblank_inmodeset[crtc] = 0x1;
+               if (drm_vblank_get(dev, crtc) == 0)
+                       dev->vblank_inmodeset[crtc] |= 0x2;
+       }
+}
+EXPORT_SYMBOL(drm_vblank_pre_modeset);
+
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+{
+       unsigned long irqflags;
+
+       if (dev->vblank_inmodeset[crtc]) {
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+               dev->vblank_disable_allowed = 1;
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+               if (dev->vblank_inmodeset[crtc] & 0x2)
+                       drm_vblank_put(dev, crtc);
+
+               dev->vblank_inmodeset[crtc] = 0;
+       }
+}
+EXPORT_SYMBOL(drm_vblank_post_modeset);
+
+/**
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ *
+ * Generally the counter will reset across mode sets.  If interrupts are
+ * enabled around this call, we don't have to do anything since the counter
+ * will have already been incremented.
+ */
+int drm_modeset_ctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_modeset_ctl *modeset = data;
+       int crtc, ret = 0;
+
+       /* If drm_vblank_init() hasn't been called yet, just no-op */
+       if (!dev->num_crtcs)
+               goto out;
+
+       crtc = modeset->crtc;
+       if (crtc >= dev->num_crtcs) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       switch (modeset->cmd) {
+       case _DRM_PRE_MODESET:
+               drm_vblank_pre_modeset(dev, crtc);
+               break;
+       case _DRM_POST_MODESET:
+               drm_vblank_post_modeset(dev, crtc);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+out:
+       return ret;
+}
+
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+                                 union drm_wait_vblank *vblwait,
+                                 struct drm_file *file_priv)
+{
+       struct drm_pending_vblank_event *e;
+       struct timeval now;
+       unsigned long flags;
+       unsigned int seq;
+
+       e = kzalloc(sizeof *e, GFP_KERNEL);
+       if (e == NULL)
+               return -ENOMEM;
+
+       e->pipe = pipe;
+       e->event.base.type = DRM_EVENT_VBLANK;
+       e->event.base.length = sizeof e->event;
+       e->event.user_data = vblwait->request.signal;
+       e->base.event = &e->event.base;
+       e->base.file_priv = file_priv;
+       e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+       do_gettimeofday(&now);
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       if (file_priv->event_space < sizeof e->event) {
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+               kfree(e);
+               return -ENOMEM;
+       }
+
+       file_priv->event_space -= sizeof e->event;
+       seq = drm_vblank_count(dev, pipe);
+       if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+           (seq - vblwait->request.sequence) <= (1 << 23)) {
+               vblwait->request.sequence = seq + 1;
+               vblwait->reply.sequence = vblwait->request.sequence;
+       }
+
+       DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+                 vblwait->request.sequence, seq, pipe);
+
+       e->event.sequence = vblwait->request.sequence;
+       if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+               drm_vblank_put(dev, e->pipe);
+               list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+       } else {
+               list_add_tail(&e->base.link, &dev->vblank_event_list);
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       return 0;
+}
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * This function enables the vblank interrupt on the pipe requested, then
+ * sleeps waiting for the requested sequence number to occur, and drops
+ * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * after a timeout with no further vblank waits scheduled).
+ */
+int drm_wait_vblank(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       union drm_wait_vblank *vblwait = data;
+       int ret = 0;
+       unsigned int flags, seq, crtc;
+
+       if ((!dev->pdev->irq) || (!dev->irq_enabled))
+               return -EINVAL;
+
+       if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+               return -EINVAL;
+
+       if (vblwait->request.type &
+           ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+               DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+                         vblwait->request.type,
+                         (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
+               return -EINVAL;
+       }
+
+       flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+       crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+
+       if (crtc >= dev->num_crtcs)
+               return -EINVAL;
+
+       ret = drm_vblank_get(dev, crtc);
+       if (ret) {
+               DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
+               return ret;
+       }
+       seq = drm_vblank_count(dev, crtc);
+
+       switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
+       case _DRM_VBLANK_RELATIVE:
+               vblwait->request.sequence += seq;
+               vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+       case _DRM_VBLANK_ABSOLUTE:
+               break;
+       default:
+               ret = -EINVAL;
+               goto done;
+       }
+
+       if (flags & _DRM_VBLANK_EVENT)
+               return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+
+       if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+           (seq - vblwait->request.sequence) <= (1<<23)) {
+               vblwait->request.sequence = seq + 1;
+       }
+
+       DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+                 vblwait->request.sequence, crtc);
+       dev->last_vblank_wait[crtc] = vblwait->request.sequence;
+       DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+                   (((drm_vblank_count(dev, crtc) -
+                      vblwait->request.sequence) <= (1 << 23)) ||
+                    !dev->irq_enabled));
+
+       if (ret != -EINTR) {
+               struct timeval now;
+
+               do_gettimeofday(&now);
+
+               vblwait->reply.tval_sec = now.tv_sec;
+               vblwait->reply.tval_usec = now.tv_usec;
+               vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+               DRM_DEBUG("returning %d to client\n",
+                         vblwait->reply.sequence);
+       } else {
+               DRM_DEBUG("vblank wait interrupted by signal\n");
+       }
+
+done:
+       drm_vblank_put(dev, crtc);
+       return ret;
+}
+
+void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+       struct drm_pending_vblank_event *e, *t;
+       struct timeval now;
+       unsigned long flags;
+       unsigned int seq;
+
+       do_gettimeofday(&now);
+       seq = drm_vblank_count(dev, crtc);
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+               if (e->pipe != crtc)
+                       continue;
+               if ((seq - e->event.sequence) > (1<<23))
+                       continue;
+
+               DRM_DEBUG("vblank event on %d, current %d\n",
+                         e->event.sequence, seq);
+
+               e->event.sequence = seq;
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+               drm_vblank_put(dev, e->pipe);
+               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+void drm_handle_vblank(struct drm_device *dev, int crtc)
+{
+       if (!dev->num_crtcs)
+               return;
+
+       atomic_inc(&dev->_vblank_count[crtc]);
+       DRM_WAKEUP(&dev->vbl_queue[crtc]);
+       drm_handle_vblank_events(dev, crtc);
+}
+EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_lock.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_lock.c
new file mode 100644 (file)
index 0000000..e2f70a5
--- /dev/null
@@ -0,0 +1,392 @@
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+static int drm_notifier(void *priv);
+
+/**
+ * Lock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Add the current task to the lock wait queue, and attempt to take to lock.
+ */
+int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       DECLARE_WAITQUEUE(entry, current);
+       struct drm_lock *lock = data;
+       struct drm_master *master = file_priv->master;
+       int ret = 0;
+
+       ++file_priv->lock_count;
+
+       if (lock->context == DRM_KERNEL_CONTEXT) {
+               DRM_ERROR("Process %d using kernel context %d\n",
+                         task_pid_nr(current), lock->context);
+               return -EINVAL;
+       }
+
+       DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+                 lock->context, task_pid_nr(current),
+                 master->lock.hw_lock->lock, lock->flags);
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
+               if (lock->context < 0)
+                       return -EINVAL;
+
+       add_wait_queue(&master->lock.lock_queue, &entry);
+       spin_lock_bh(&master->lock.spinlock);
+       master->lock.user_waiters++;
+       spin_unlock_bh(&master->lock.spinlock);
+
+       for (;;) {
+               __set_current_state(TASK_INTERRUPTIBLE);
+               if (!master->lock.hw_lock) {
+                       /* Device has been unregistered */
+                       send_sig(SIGTERM, current, 0);
+                       ret = -EINTR;
+                       break;
+               }
+               if (drm_lock_take(&master->lock, lock->context)) {
+                       master->lock.file_priv = file_priv;
+                       master->lock.lock_time = jiffies;
+                       atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+                       break;  /* Got lock */
+               }
+
+               /* Contention */
+               schedule();
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+       }
+       spin_lock_bh(&master->lock.spinlock);
+       master->lock.user_waiters--;
+       spin_unlock_bh(&master->lock.spinlock);
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&master->lock.lock_queue, &entry);
+
+       DRM_DEBUG("%d %s\n", lock->context,
+                 ret ? "interrupted" : "has lock");
+       if (ret) return ret;
+
+       /* don't set the block all signals on the master process for now 
+        * really probably not the correct answer but lets us debug xkb
+        * xserver for now */
+       if (!file_priv->is_master) {
+               sigemptyset(&dev->sigmask);
+               sigaddset(&dev->sigmask, SIGSTOP);
+               sigaddset(&dev->sigmask, SIGTSTP);
+               sigaddset(&dev->sigmask, SIGTTIN);
+               sigaddset(&dev->sigmask, SIGTTOU);
+               dev->sigdata.context = lock->context;
+               dev->sigdata.lock = master->lock.hw_lock;
+               block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+       }
+
+       if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
+               dev->driver->dma_ready(dev);
+
+       if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+       {
+               if (dev->driver->dma_quiescent(dev)) {
+                       DRM_DEBUG("%d waiting for DMA quiescent\n",
+                                 lock->context);
+                       return -EBUSY;
+               }
+       }
+
+       if (dev->driver->kernel_context_switch &&
+           dev->last_context != lock->context) {
+               dev->driver->kernel_context_switch(dev, dev->last_context,
+                                                  lock->context);
+       }
+
+       return 0;
+}
+
+/**
+ * Unlock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Transfer and free the lock.
+ */
+int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_lock *lock = data;
+       struct drm_master *master = file_priv->master;
+
+       if (lock->context == DRM_KERNEL_CONTEXT) {
+               DRM_ERROR("Process %d using kernel context %d\n",
+                         task_pid_nr(current), lock->context);
+               return -EINVAL;
+       }
+
+       atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+       /* kernel_context_switch isn't used by any of the x86 drm
+        * modules but is required by the Sparc driver.
+        */
+       if (dev->driver->kernel_context_switch_unlock)
+               dev->driver->kernel_context_switch_unlock(dev);
+       else {
+               if (drm_lock_free(&master->lock, lock->context)) {
+                       /* FIXME: Should really bail out here. */
+               }
+       }
+
+       unblock_all_signals();
+       return 0;
+}
+
+/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+int drm_lock_take(struct drm_lock_data *lock_data,
+                 unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       do {
+               old = *lock;
+               if (old & _DRM_LOCK_HELD)
+                       new = old | _DRM_LOCK_CONT;
+               else {
+                       new = context | _DRM_LOCK_HELD |
+                               ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+                                _DRM_LOCK_CONT : 0);
+               }
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+       spin_unlock_bh(&lock_data->spinlock);
+
+       if (_DRM_LOCKING_CONTEXT(old) == context) {
+               if (old & _DRM_LOCK_HELD) {
+                       if (context != DRM_KERNEL_CONTEXT) {
+                               DRM_ERROR("%d holds heavyweight lock\n",
+                                         context);
+                       }
+                       return 0;
+               }
+       }
+
+       if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
+               /* Have lock */
+               return 1;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_lock_take);
+
+/**
+ * This takes a lock forcibly and hands it to context. Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+                            unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       lock_data->file_priv = NULL;
+       do {
+               old = *lock;
+               new = context | _DRM_LOCK_HELD;
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+       return 1;
+}
+
+/**
+ * Free lock.
+ *
+ * \param dev DRM device.
+ * \param lock lock.
+ * \param context context.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
+ * waiting on the lock queue.
+ */
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       if (lock_data->kernel_waiters != 0) {
+               drm_lock_transfer(lock_data, 0);
+               lock_data->idle_has_lock = 1;
+               spin_unlock_bh(&lock_data->spinlock);
+               return 1;
+       }
+       spin_unlock_bh(&lock_data->spinlock);
+
+       do {
+               old = *lock;
+               new = _DRM_LOCKING_CONTEXT(old);
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+
+       if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+               DRM_ERROR("%d freed heavyweight lock held by %d\n",
+                         context, _DRM_LOCKING_CONTEXT(old));
+               return 1;
+       }
+       wake_up_interruptible(&lock_data->lock_queue);
+       return 0;
+}
+EXPORT_SYMBOL(drm_lock_free);
+
+/**
+ * If we get here, it means that the process has called DRM_IOCTL_LOCK
+ * without calling DRM_IOCTL_UNLOCK.
+ *
+ * If the lock is not held, then let the signal proceed as usual.  If the lock
+ * is held, then set the contended flag and keep the signal blocked.
+ *
+ * \param priv pointer to a drm_sigdata structure.
+ * \return one if the signal should be delivered normally, or zero if the
+ * signal should be blocked.
+ */
+static int drm_notifier(void *priv)
+{
+       struct drm_sigdata *s = (struct drm_sigdata *) priv;
+       unsigned int old, new, prev;
+
+       /* Allow signal delivery if lock isn't held */
+       if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
+           || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
+               return 1;
+
+       /* Otherwise, set flag to force call to
+          drmUnlock */
+       do {
+               old = s->lock->lock;
+               new = old | _DRM_LOCK_CONT;
+               prev = cmpxchg(&s->lock->lock, old, new);
+       } while (prev != old);
+       return 0;
+}
+
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(struct drm_lock_data *lock_data)
+{
+       int ret = 0;
+
+       spin_lock_bh(&lock_data->spinlock);
+       lock_data->kernel_waiters++;
+       if (!lock_data->idle_has_lock) {
+
+               spin_unlock_bh(&lock_data->spinlock);
+               ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
+               spin_lock_bh(&lock_data->spinlock);
+
+               if (ret == 1)
+                       lock_data->idle_has_lock = 1;
+       }
+       spin_unlock_bh(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_take);
+
+void drm_idlelock_release(struct drm_lock_data *lock_data)
+{
+       unsigned int old, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       if (--lock_data->kernel_waiters == 0) {
+               if (lock_data->idle_has_lock) {
+                       do {
+                               old = *lock;
+                               prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
+                       } while (prev != old);
+                       wake_up_interruptible(&lock_data->lock_queue);
+                       lock_data->idle_has_lock = 0;
+               }
+       }
+       spin_unlock_bh(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_release);
+
+
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_master *master = file_priv->master;
+       return (file_priv->lock_count && master->lock.hw_lock &&
+               _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+               master->lock.file_priv == file_priv);
+}
+
+EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_memory.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_memory.c
new file mode 100644 (file)
index 0000000..e4865f9
--- /dev/null
@@ -0,0 +1,168 @@
+/**
+ * \file drm_memory.c
+ * Memory management wrappers for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include "drmP.h"
+
+/**
+ * Called when "/proc/dri/%dev%/mem" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param len requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ *
+ * No-op.
+ */
+int drm_mem_info(char *buf, char **start, off_t offset,
+                int len, int *eof, void *data)
+{
+       return 0;
+}
+
+#if __OS_HAS_AGP
+static void *agp_remap(unsigned long offset, unsigned long size,
+                      struct drm_device * dev)
+{
+       unsigned long i, num_pages =
+           PAGE_ALIGN(size) / PAGE_SIZE;
+       struct drm_agp_mem *agpmem;
+       struct page **page_map;
+       struct page **phys_page_map;
+       void *addr;
+
+       size = PAGE_ALIGN(size);
+
+#ifdef __alpha__
+       offset -= dev->hose->mem_space->start;
+#endif
+
+       list_for_each_entry(agpmem, &dev->agp->memory, head)
+               if (agpmem->bound <= offset
+                   && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
+                   (offset + size))
+                       break;
+       if (!agpmem)
+               return NULL;
+
+       /*
+        * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
+        * the CPU do not get remapped by the GART.  We fix this by using the kernel's
+        * page-table instead (that's probably faster anyhow...).
+        */
+       /* note: use vmalloc() because num_pages could be large... */
+       page_map = vmalloc(num_pages * sizeof(struct page *));
+       if (!page_map)
+               return NULL;
+
+       phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
+       for (i = 0; i < num_pages; ++i)
+               page_map[i] = phys_page_map[i];
+       addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
+       vfree(page_map);
+
+       return addr;
+}
+
+/** Wrapper around agp_allocate_memory() */
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
+{
+       return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
+}
+
+/** Wrapper around agp_free_memory() */
+int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+{
+       return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL(drm_free_agp);
+
+/** Wrapper around agp_bind_memory() */
+int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+{
+       return drm_agp_bind_memory(handle, start);
+}
+
+/** Wrapper around agp_unbind_memory() */
+int drm_unbind_agp(DRM_AGP_MEM * handle)
+{
+       return drm_agp_unbind_memory(handle);
+}
+EXPORT_SYMBOL(drm_unbind_agp);
+
+#else  /*  __OS_HAS_AGP  */
+static inline void *agp_remap(unsigned long offset, unsigned long size,
+                             struct drm_device * dev)
+{
+       return NULL;
+}
+
+#endif                         /* agp */
+
+void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
+{
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               map->handle = agp_remap(map->offset, map->size, dev);
+       else
+               map->handle = ioremap(map->offset, map->size);
+}
+EXPORT_SYMBOL(drm_core_ioremap);
+
+void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
+{
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               map->handle = agp_remap(map->offset, map->size, dev);
+       else
+               map->handle = ioremap_wc(map->offset, map->size);
+}
+EXPORT_SYMBOL(drm_core_ioremap_wc);
+
+void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
+{
+       if (!map->handle || !map->size)
+               return;
+
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               vunmap(map->handle);
+       else
+               iounmap(map->handle);
+}
+EXPORT_SYMBOL(drm_core_ioremapfree);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_mm.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_mm.c
new file mode 100644 (file)
index 0000000..d7d7eac
--- /dev/null
@@ -0,0 +1,510 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented. Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Aligned allocations can also see improvement.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm_mm.h"
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+
+#define MM_UNUSED_TARGET 4
+
+unsigned long drm_mm_tail_space(struct drm_mm *mm)
+{
+       struct list_head *tail_node;
+       struct drm_mm_node *entry;
+
+       tail_node = mm->ml_entry.prev;
+       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       if (!entry->free)
+               return 0;
+
+       return entry->size;
+}
+
+int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+{
+       struct list_head *tail_node;
+       struct drm_mm_node *entry;
+
+       tail_node = mm->ml_entry.prev;
+       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       if (!entry->free)
+               return -ENOMEM;
+
+       if (entry->size <= size)
+               return -ENOMEM;
+
+       entry->size -= size;
+       return 0;
+}
+
+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
+{
+       struct drm_mm_node *child;
+
+       if (atomic)
+               child = kmalloc(sizeof(*child), GFP_ATOMIC);
+       else
+               child = kmalloc(sizeof(*child), GFP_KERNEL);
+
+       if (unlikely(child == NULL)) {
+               spin_lock(&mm->unused_lock);
+               if (list_empty(&mm->unused_nodes))
+                       child = NULL;
+               else {
+                       child =
+                           list_entry(mm->unused_nodes.next,
+                                      struct drm_mm_node, fl_entry);
+                       list_del(&child->fl_entry);
+                       --mm->num_unused;
+               }
+               spin_unlock(&mm->unused_lock);
+       }
+       return child;
+}
+
+/* drm_mm_pre_get() - pre allocate drm_mm_node structure
+ * drm_mm:     memory manager struct we are pre-allocating for
+ *
+ * Returns 0 on success or -ENOMEM if allocation fails.
+ */
+int drm_mm_pre_get(struct drm_mm *mm)
+{
+       struct drm_mm_node *node;
+
+       spin_lock(&mm->unused_lock);
+       while (mm->num_unused < MM_UNUSED_TARGET) {
+               spin_unlock(&mm->unused_lock);
+               node = kmalloc(sizeof(*node), GFP_KERNEL);
+               spin_lock(&mm->unused_lock);
+
+               if (unlikely(node == NULL)) {
+                       int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
+                       spin_unlock(&mm->unused_lock);
+                       return ret;
+               }
+               ++mm->num_unused;
+               list_add_tail(&node->fl_entry, &mm->unused_nodes);
+       }
+       spin_unlock(&mm->unused_lock);
+       return 0;
+}
+EXPORT_SYMBOL(drm_mm_pre_get);
+
+static int drm_mm_create_tail_node(struct drm_mm *mm,
+                                  unsigned long start,
+                                  unsigned long size, int atomic)
+{
+       struct drm_mm_node *child;
+
+       child = drm_mm_kmalloc(mm, atomic);
+       if (unlikely(child == NULL))
+               return -ENOMEM;
+
+       child->free = 1;
+       child->size = size;
+       child->start = start;
+       child->mm = mm;
+
+       list_add_tail(&child->ml_entry, &mm->ml_entry);
+       list_add_tail(&child->fl_entry, &mm->fl_entry);
+
+       return 0;
+}
+
+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
+{
+       struct list_head *tail_node;
+       struct drm_mm_node *entry;
+
+       tail_node = mm->ml_entry.prev;
+       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       if (!entry->free) {
+               return drm_mm_create_tail_node(mm, entry->start + entry->size,
+                                              size, atomic);
+       }
+       entry->size += size;
+       return 0;
+}
+
+static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+                                                unsigned long size,
+                                                int atomic)
+{
+       struct drm_mm_node *child;
+
+       child = drm_mm_kmalloc(parent->mm, atomic);
+       if (unlikely(child == NULL))
+               return NULL;
+
+       INIT_LIST_HEAD(&child->fl_entry);
+
+       child->free = 0;
+       child->size = size;
+       child->start = parent->start;
+       child->mm = parent->mm;
+
+       list_add_tail(&child->ml_entry, &parent->ml_entry);
+       INIT_LIST_HEAD(&child->fl_entry);
+
+       parent->size -= size;
+       parent->start += size;
+       return child;
+}
+
+
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+                                            unsigned long size,
+                                            unsigned alignment,
+                                            int atomic)
+{
+
+       struct drm_mm_node *align_splitoff = NULL;
+       unsigned tmp = 0;
+
+       if (alignment)
+               tmp = node->start % alignment;
+
+       if (tmp) {
+               align_splitoff =
+                   drm_mm_split_at_start(node, alignment - tmp, atomic);
+               if (unlikely(align_splitoff == NULL))
+                       return NULL;
+       }
+
+       if (node->size == size) {
+               list_del_init(&node->fl_entry);
+               node->free = 0;
+       } else {
+               node = drm_mm_split_at_start(node, size, atomic);
+       }
+
+       if (align_splitoff)
+               drm_mm_put_block(align_splitoff);
+
+       return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_generic);
+
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long start,
+                                               unsigned long end,
+                                               int atomic)
+{
+       struct drm_mm_node *align_splitoff = NULL;
+       unsigned tmp = 0;
+       unsigned wasted = 0;
+
+       if (node->start < start)
+               wasted += start - node->start;
+       if (alignment)
+               tmp = ((node->start + wasted) % alignment);
+
+       if (tmp)
+               wasted += alignment - tmp;
+       if (wasted) {
+               align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
+               if (unlikely(align_splitoff == NULL))
+                       return NULL;
+       }
+
+       if (node->size == size) {
+               list_del_init(&node->fl_entry);
+               node->free = 0;
+       } else {
+               node = drm_mm_split_at_start(node, size, atomic);
+       }
+
+       if (align_splitoff)
+               drm_mm_put_block(align_splitoff);
+
+       return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_range_generic);
+
+/*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+void drm_mm_put_block(struct drm_mm_node *cur)
+{
+
+       struct drm_mm *mm = cur->mm;
+       struct list_head *cur_head = &cur->ml_entry;
+       struct list_head *root_head = &mm->ml_entry;
+       struct drm_mm_node *prev_node = NULL;
+       struct drm_mm_node *next_node;
+
+       int merged = 0;
+
+       if (cur_head->prev != root_head) {
+               prev_node =
+                   list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+               if (prev_node->free) {
+                       prev_node->size += cur->size;
+                       merged = 1;
+               }
+       }
+       if (cur_head->next != root_head) {
+               next_node =
+                   list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+               if (next_node->free) {
+                       if (merged) {
+                               prev_node->size += next_node->size;
+                               list_del(&next_node->ml_entry);
+                               list_del(&next_node->fl_entry);
+                               spin_lock(&mm->unused_lock);
+                               if (mm->num_unused < MM_UNUSED_TARGET) {
+                                       list_add(&next_node->fl_entry,
+                                                &mm->unused_nodes);
+                                       ++mm->num_unused;
+                               } else
+                                       kfree(next_node);
+                               spin_unlock(&mm->unused_lock);
+                       } else {
+                               next_node->size += cur->size;
+                               next_node->start = cur->start;
+                               merged = 1;
+                       }
+               }
+       }
+       if (!merged) {
+               cur->free = 1;
+               list_add(&cur->fl_entry, &mm->fl_entry);
+       } else {
+               list_del(&cur->ml_entry);
+               spin_lock(&mm->unused_lock);
+               if (mm->num_unused < MM_UNUSED_TARGET) {
+                       list_add(&cur->fl_entry, &mm->unused_nodes);
+                       ++mm->num_unused;
+               } else
+                       kfree(cur);
+               spin_unlock(&mm->unused_lock);
+       }
+}
+
+EXPORT_SYMBOL(drm_mm_put_block);
+
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+                                      unsigned long size,
+                                      unsigned alignment, int best_match)
+{
+       struct list_head *list;
+       const struct list_head *free_stack = &mm->fl_entry;
+       struct drm_mm_node *entry;
+       struct drm_mm_node *best;
+       unsigned long best_size;
+       unsigned wasted;
+
+       best = NULL;
+       best_size = ~0UL;
+
+       list_for_each(list, free_stack) {
+               entry = list_entry(list, struct drm_mm_node, fl_entry);
+               wasted = 0;
+
+               if (entry->size < size)
+                       continue;
+
+               if (alignment) {
+                       register unsigned tmp = entry->start % alignment;
+                       if (tmp)
+                               wasted += alignment - tmp;
+               }
+
+               if (entry->size >= size + wasted) {
+                       if (!best_match)
+                               return entry;
+                       if (size < best_size) {
+                               best = entry;
+                               best_size = entry->size;
+                       }
+               }
+       }
+
+       return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free);
+
+struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long start,
+                                               unsigned long end,
+                                               int best_match)
+{
+       struct list_head *list;
+       const struct list_head *free_stack = &mm->fl_entry;
+       struct drm_mm_node *entry;
+       struct drm_mm_node *best;
+       unsigned long best_size;
+       unsigned wasted;
+
+       best = NULL;
+       best_size = ~0UL;
+
+       list_for_each(list, free_stack) {
+               entry = list_entry(list, struct drm_mm_node, fl_entry);
+               wasted = 0;
+
+               if (entry->size < size)
+                       continue;
+
+               if (entry->start > end || (entry->start+entry->size) < start)
+                       continue;
+
+               if (entry->start < start)
+                       wasted += start - entry->start;
+
+               if (alignment) {
+                       register unsigned tmp = (entry->start + wasted) % alignment;
+                       if (tmp)
+                               wasted += alignment - tmp;
+               }
+
+               if (entry->size >= size + wasted) {
+                       if (!best_match)
+                               return entry;
+                       if (size < best_size) {
+                               best = entry;
+                               best_size = entry->size;
+                       }
+               }
+       }
+
+       return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_in_range);
+
+int drm_mm_clean(struct drm_mm * mm)
+{
+       struct list_head *head = &mm->ml_entry;
+
+       return (head->next->next == head);
+}
+EXPORT_SYMBOL(drm_mm_clean);
+
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+{
+       INIT_LIST_HEAD(&mm->ml_entry);
+       INIT_LIST_HEAD(&mm->fl_entry);
+       INIT_LIST_HEAD(&mm->unused_nodes);
+       mm->num_unused = 0;
+       spin_lock_init(&mm->unused_lock);
+
+       return drm_mm_create_tail_node(mm, start, size, 0);
+}
+EXPORT_SYMBOL(drm_mm_init);
+
+void drm_mm_takedown(struct drm_mm * mm)
+{
+       struct list_head *bnode = mm->fl_entry.next;
+       struct drm_mm_node *entry;
+       struct drm_mm_node *next;
+
+       entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+
+       if (entry->ml_entry.next != &mm->ml_entry ||
+           entry->fl_entry.next != &mm->fl_entry) {
+               DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+               return;
+       }
+
+       list_del(&entry->fl_entry);
+       list_del(&entry->ml_entry);
+       kfree(entry);
+
+       spin_lock(&mm->unused_lock);
+       list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
+               list_del(&entry->fl_entry);
+               kfree(entry);
+               --mm->num_unused;
+       }
+       spin_unlock(&mm->unused_lock);
+
+       BUG_ON(mm->num_unused != 0);
+}
+EXPORT_SYMBOL(drm_mm_takedown);
+
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+       struct drm_mm_node *entry;
+       int total_used = 0, total_free = 0, total = 0;
+
+       list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+               printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+                       prefix, entry->start, entry->start + entry->size,
+                       entry->size, entry->free ? "free" : "used");
+               total += entry->size;
+               if (entry->free)
+                       total_free += entry->size;
+               else
+                       total_used += entry->size;
+       }
+       printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+               total_used, total_free);
+}
+EXPORT_SYMBOL(drm_mm_debug_table);
+
+#if defined(CONFIG_DEBUG_FS)
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+{
+       struct drm_mm_node *entry;
+       int total_used = 0, total_free = 0, total = 0;
+
+       list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+               seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
+               total += entry->size;
+               if (entry->free)
+                       total_free += entry->size;
+               else
+                       total_used += entry->size;
+       }
+       seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
+       return 0;
+}
+EXPORT_SYMBOL(drm_mm_dump_table);
+#endif
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_modes.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_modes.c
new file mode 100644 (file)
index 0000000..6d81a02
--- /dev/null
@@ -0,0 +1,1022 @@
+/*
+ * The list_sort function is (presumably) licensed under the GPL (see the
+ * top level "COPYING" file for details).
+ *
+ * The remainder of this file is:
+ *
+ * Copyright Â© 1997-2003 by The XFree86 Project, Inc.
+ * Copyright Â© 2007 Dave Airlie
+ * Copyright Â© 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2005-2006 Luc Verhaegen
+ * Copyright (c) 2001, Andy Ritger  aritger@nvidia.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+#include <linux/list.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+{
+       DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
+                       "0x%x 0x%x\n",
+               mode->base.id, mode->name, mode->vrefresh, mode->clock,
+               mode->hdisplay, mode->hsync_start,
+               mode->hsync_end, mode->htotal,
+               mode->vdisplay, mode->vsync_start,
+               mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+}
+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
+
+/**
+ * drm_cvt_mode -create a modeline based on CVT algorithm
+ * @dev: DRM device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh  : vrefresh rate
+ * @reduced : Whether the GTF calculation is simplified
+ * @interlaced:Whether the interlace is supported
+ *
+ * LOCKING:
+ * none.
+ *
+ * return the modeline based on CVT algorithm
+ *
+ * This function is called to generate the modeline based on CVT algorithm
+ * according to the hdisplay, vdisplay, vrefresh.
+ * It is based from the VESA(TM) Coordinated Video Timing Generator by
+ * Graham Loveridge April 9, 2003 available at
+ * http://www.vesa.org/public/CVT/CVTd6r1.xls
+ *
+ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
+ * What I have done is to translate it by using integer calculation.
+ */
+#define HV_FACTOR                      1000
+struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
+                                     int vdisplay, int vrefresh,
+                                     bool reduced, bool interlaced, bool margins)
+{
+       /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define        CVT_MARGIN_PERCENTAGE           18
+       /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define        CVT_H_GRANULARITY               8
+       /* 3) Minimum vertical porch (lines) - default 3 */
+#define        CVT_MIN_V_PORCH                 3
+       /* 4) Minimum number of vertical back porch lines - default 6 */
+#define        CVT_MIN_V_BPORCH                6
+       /* Pixel Clock step (kHz) */
+#define CVT_CLOCK_STEP                 250
+       struct drm_display_mode *drm_mode;
+       unsigned int vfieldrate, hperiod;
+       int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
+       int interlace;
+
+       /* allocate the drm_display_mode structure. If failure, we will
+        * return directly
+        */
+       drm_mode = drm_mode_create(dev);
+       if (!drm_mode)
+               return NULL;
+
+       /* the CVT default refresh rate is 60Hz */
+       if (!vrefresh)
+               vrefresh = 60;
+
+       /* the required field fresh rate */
+       if (interlaced)
+               vfieldrate = vrefresh * 2;
+       else
+               vfieldrate = vrefresh;
+
+       /* horizontal pixels */
+       hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
+
+       /* determine the left&right borders */
+       hmargin = 0;
+       if (margins) {
+               hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+               hmargin -= hmargin % CVT_H_GRANULARITY;
+       }
+       /* find the total active pixels */
+       drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
+
+       /* find the number of lines per field */
+       if (interlaced)
+               vdisplay_rnd = vdisplay / 2;
+       else
+               vdisplay_rnd = vdisplay;
+
+       /* find the top & bottom borders */
+       vmargin = 0;
+       if (margins)
+               vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+
+       drm_mode->vdisplay = vdisplay + 2 * vmargin;
+
+       /* Interlaced */
+       if (interlaced)
+               interlace = 1;
+       else
+               interlace = 0;
+
+       /* Determine VSync Width from aspect ratio */
+       if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
+               vsync = 4;
+       else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
+               vsync = 5;
+       else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
+               vsync = 6;
+       else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
+               vsync = 7;
+       else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
+               vsync = 7;
+       else /* custom */
+               vsync = 10;
+
+       if (!reduced) {
+               /* simplify the GTF calculation */
+               /* 4) Minimum time of vertical sync + back porch interval (µs)
+                * default 550.0
+                */
+               int tmp1, tmp2;
+#define CVT_MIN_VSYNC_BP       550
+               /* 3) Nominal HSync width (% of line period) - default 8 */
+#define CVT_HSYNC_PERCENTAGE   8
+               unsigned int hblank_percentage;
+               int vsyncandback_porch, vback_porch, hblank;
+
+               /* estimated the horizontal period */
+               tmp1 = HV_FACTOR * 1000000  -
+                               CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
+               tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
+                               interlace;
+               hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
+
+               tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
+               /* 9. Find number of lines in sync + backporch */
+               if (tmp1 < (vsync + CVT_MIN_V_PORCH))
+                       vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
+               else
+                       vsyncandback_porch = tmp1;
+               /* 10. Find number of lines in back porch */
+               vback_porch = vsyncandback_porch - vsync;
+               drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
+                               vsyncandback_porch + CVT_MIN_V_PORCH;
+               /* 5) Definition of Horizontal blanking time limitation */
+               /* Gradient (%/kHz) - default 600 */
+#define CVT_M_FACTOR   600
+               /* Offset (%) - default 40 */
+#define CVT_C_FACTOR   40
+               /* Blanking time scaling factor - default 128 */
+#define CVT_K_FACTOR   128
+               /* Scaling factor weighting - default 20 */
+#define CVT_J_FACTOR   20
+#define CVT_M_PRIME    (CVT_M_FACTOR * CVT_K_FACTOR / 256)
+#define CVT_C_PRIME    ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
+                        CVT_J_FACTOR)
+               /* 12. Find ideal blanking duty cycle from formula */
+               hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
+                                       hperiod / 1000;
+               /* 13. Blanking time */
+               if (hblank_percentage < 20 * HV_FACTOR)
+                       hblank_percentage = 20 * HV_FACTOR;
+               hblank = drm_mode->hdisplay * hblank_percentage /
+                        (100 * HV_FACTOR - hblank_percentage);
+               hblank -= hblank % (2 * CVT_H_GRANULARITY);
+               /* 14. find the total pixes per line */
+               drm_mode->htotal = drm_mode->hdisplay + hblank;
+               drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
+               drm_mode->hsync_start = drm_mode->hsync_end -
+                       (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
+               drm_mode->hsync_start += CVT_H_GRANULARITY -
+                       drm_mode->hsync_start % CVT_H_GRANULARITY;
+               /* fill the Vsync values */
+               drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
+               drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+       } else {
+               /* Reduced blanking */
+               /* Minimum vertical blanking interval time (µs)- default 460 */
+#define CVT_RB_MIN_VBLANK      460
+               /* Fixed number of clocks for horizontal sync */
+#define CVT_RB_H_SYNC          32
+               /* Fixed number of clocks for horizontal blanking */
+#define CVT_RB_H_BLANK         160
+               /* Fixed number of lines for vertical front porch - default 3*/
+#define CVT_RB_VFPORCH         3
+               int vbilines;
+               int tmp1, tmp2;
+               /* 8. Estimate Horizontal period. */
+               tmp1 = HV_FACTOR * 1000000 -
+                       CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
+               tmp2 = vdisplay_rnd + 2 * vmargin;
+               hperiod = tmp1 / (tmp2 * vfieldrate);
+               /* 9. Find number of lines in vertical blanking */
+               vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
+               /* 10. Check if vertical blanking is sufficient */
+               if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
+                       vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
+               /* 11. Find total number of lines in vertical field */
+               drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
+               /* 12. Find total number of pixels in a line */
+               drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
+               /* Fill in HSync values */
+               drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
+               drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
+       }
+       /* 15/13. Find pixel clock frequency (kHz for xf86) */
+       drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
+       drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+       /* 18/16. Find actual vertical frame frequency */
+       /* ignore - just set the mode flag for interlaced */
+       if (interlaced)
+               drm_mode->vtotal *= 2;
+       /* Fill the mode line name */
+       drm_mode_set_name(drm_mode);
+       if (reduced)
+               drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
+                                       DRM_MODE_FLAG_NVSYNC);
+       else
+               drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
+                                       DRM_MODE_FLAG_NHSYNC);
+       if (interlaced)
+               drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+    return drm_mode;
+}
+EXPORT_SYMBOL(drm_cvt_mode);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev                :drm device
+ * @hdisplay   :hdisplay size
+ * @vdisplay   :vdisplay size
+ * @vrefresh   :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins    :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ *     GTF Spreadsheet by Andy Morrish (1/5/97)
+ *     available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ */
+struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
+                                     int vdisplay, int vrefresh,
+                                     bool interlaced, int margins)
+{
+       /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define        GTF_MARGIN_PERCENTAGE           18
+       /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define        GTF_CELL_GRAN                   8
+       /* 3) Minimum vertical porch (lines) - default 3 */
+#define        GTF_MIN_V_PORCH                 1
+       /* width of vsync in lines */
+#define V_SYNC_RQD                     3
+       /* width of hsync as % of total line */
+#define H_SYNC_PERCENT                 8
+       /* min time of vsync + back porch (microsec) */
+#define MIN_VSYNC_PLUS_BP              550
+       /* blanking formula gradient */
+#define GTF_M                          600
+       /* blanking formula offset */
+#define GTF_C                          40
+       /* blanking formula scaling factor */
+#define GTF_K                          128
+       /* blanking formula scaling factor */
+#define GTF_J                          20
+       /* C' and M' are part of the Blanking Duty Cycle computation */
+#define GTF_C_PRIME            (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
+#define GTF_M_PRIME            (GTF_K * GTF_M / 256)
+       struct drm_display_mode *drm_mode;
+       unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
+       int top_margin, bottom_margin;
+       int interlace;
+       unsigned int hfreq_est;
+       int vsync_plus_bp, vback_porch;
+       unsigned int vtotal_lines, vfieldrate_est, hperiod;
+       unsigned int vfield_rate, vframe_rate;
+       int left_margin, right_margin;
+       unsigned int total_active_pixels, ideal_duty_cycle;
+       unsigned int hblank, total_pixels, pixel_freq;
+       int hsync, hfront_porch, vodd_front_porch_lines;
+       unsigned int tmp1, tmp2;
+
+       drm_mode = drm_mode_create(dev);
+       if (!drm_mode)
+               return NULL;
+
+       /* 1. In order to give correct results, the number of horizontal
+        * pixels requested is first processed to ensure that it is divisible
+        * by the character size, by rounding it to the nearest character
+        * cell boundary:
+        */
+       hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+       hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
+
+       /* 2. If interlace is requested, the number of vertical lines assumed
+        * by the calculation must be halved, as the computation calculates
+        * the number of vertical lines per field.
+        */
+       if (interlaced)
+               vdisplay_rnd = vdisplay / 2;
+       else
+               vdisplay_rnd = vdisplay;
+
+       /* 3. Find the frame rate required: */
+       if (interlaced)
+               vfieldrate_rqd = vrefresh * 2;
+       else
+               vfieldrate_rqd = vrefresh;
+
+       /* 4. Find number of lines in Top margin: */
+       top_margin = 0;
+       if (margins)
+               top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+                               1000;
+       /* 5. Find number of lines in bottom margin: */
+       bottom_margin = top_margin;
+
+       /* 6. If interlace is required, then set variable interlace: */
+       if (interlaced)
+               interlace = 1;
+       else
+               interlace = 0;
+
+       /* 7. Estimate the Horizontal frequency */
+       {
+               tmp1 = (1000000  - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
+               tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
+                               2 + interlace;
+               hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
+       }
+
+       /* 8. Find the number of lines in V sync + back porch */
+       /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
+       vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
+       vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
+       /*  9. Find the number of lines in V back porch alone: */
+       vback_porch = vsync_plus_bp - V_SYNC_RQD;
+       /*  10. Find the total number of lines in Vertical field period: */
+       vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
+                       vsync_plus_bp + GTF_MIN_V_PORCH;
+       /*  11. Estimate the Vertical field frequency: */
+       vfieldrate_est = hfreq_est / vtotal_lines;
+       /*  12. Find the actual horizontal period: */
+       hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
+
+       /*  13. Find the actual Vertical field frequency: */
+       vfield_rate = hfreq_est / vtotal_lines;
+       /*  14. Find the Vertical frame frequency: */
+       if (interlaced)
+               vframe_rate = vfield_rate / 2;
+       else
+               vframe_rate = vfield_rate;
+       /*  15. Find number of pixels in left margin: */
+       if (margins)
+               left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+                               1000;
+       else
+               left_margin = 0;
+
+       /* 16.Find number of pixels in right margin: */
+       right_margin = left_margin;
+       /* 17.Find total number of active pixels in image and left and right */
+       total_active_pixels = hdisplay_rnd + left_margin + right_margin;
+       /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
+       ideal_duty_cycle = GTF_C_PRIME * 1000 -
+                               (GTF_M_PRIME * 1000000 / hfreq_est);
+       /* 19.Find the number of pixels in the blanking time to the nearest
+        * double character cell: */
+       hblank = total_active_pixels * ideal_duty_cycle /
+                       (100000 - ideal_duty_cycle);
+       hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
+       hblank = hblank * 2 * GTF_CELL_GRAN;
+       /* 20.Find total number of pixels: */
+       total_pixels = total_active_pixels + hblank;
+       /* 21.Find pixel clock frequency: */
+       pixel_freq = total_pixels * hfreq_est / 1000;
+       /* Stage 1 computations are now complete; I should really pass
+        * the results to another function and do the Stage 2 computations,
+        * but I only need a few more values so I'll just append the
+        * computations here for now */
+       /* 17. Find the number of pixels in the horizontal sync period: */
+       hsync = H_SYNC_PERCENT * total_pixels / 100;
+       hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+       hsync = hsync * GTF_CELL_GRAN;
+       /* 18. Find the number of pixels in horizontal front porch period */
+       hfront_porch = hblank / 2 - hsync;
+       /*  36. Find the number of lines in the odd front porch period: */
+       vodd_front_porch_lines = GTF_MIN_V_PORCH ;
+
+       /* finally, pack the results in the mode struct */
+       drm_mode->hdisplay = hdisplay_rnd;
+       drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
+       drm_mode->hsync_end = drm_mode->hsync_start + hsync;
+       drm_mode->htotal = total_pixels;
+       drm_mode->vdisplay = vdisplay_rnd;
+       drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
+       drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
+       drm_mode->vtotal = vtotal_lines;
+
+       drm_mode->clock = pixel_freq;
+
+       drm_mode_set_name(drm_mode);
+       drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+
+       if (interlaced) {
+               drm_mode->vtotal *= 2;
+               drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       }
+
+       return drm_mode;
+}
+EXPORT_SYMBOL(drm_gtf_mode);
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void drm_mode_set_name(struct drm_display_mode *mode)
+{
+       snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
+                mode->vdisplay);
+}
+EXPORT_SYMBOL(drm_mode_set_name);
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+       struct list_head *entry, *tmp;
+
+       list_for_each_safe(entry, tmp, head) {
+               list_move_tail(entry, new);
+       }
+}
+EXPORT_SYMBOL(drm_mode_list_concat);
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int drm_mode_width(struct drm_display_mode *mode)
+{
+       return mode->hdisplay;
+
+}
+EXPORT_SYMBOL(drm_mode_width);
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int drm_mode_height(struct drm_display_mode *mode)
+{
+       return mode->vdisplay;
+}
+EXPORT_SYMBOL(drm_mode_height);
+
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(struct drm_display_mode *mode)
+{
+       unsigned int calc_val;
+
+       if (mode->hsync)
+               return mode->hsync;
+
+       if (mode->htotal < 0)
+               return 0;
+
+       calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+       calc_val += 500;                                /* round to 1000Hz */
+       calc_val /= 1000;                               /* truncate to kHz */
+
+       return calc_val;
+}
+EXPORT_SYMBOL(drm_mode_hsync);
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
+ *
+ * FIXME: why is this needed?  shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate. It will be the result of actual value plus 0.5.
+ * If it is 70.288, it will return 70Hz.
+ * If it is 59.6, it will return 60Hz.
+ */
+int drm_mode_vrefresh(struct drm_display_mode *mode)
+{
+       int refresh = 0;
+       unsigned int calc_val;
+
+       if (mode->vrefresh > 0)
+               refresh = mode->vrefresh;
+       else if (mode->htotal > 0 && mode->vtotal > 0) {
+               int vtotal;
+               vtotal = mode->vtotal;
+               /* work out vrefresh the value will be x1000 */
+               calc_val = (mode->clock * 1000);
+               calc_val /= mode->htotal;
+               refresh = (calc_val + vtotal / 2) / vtotal;
+
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       refresh *= 2;
+               if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+                       refresh /= 2;
+               if (mode->vscan > 1)
+                       refresh /= mode->vscan;
+       }
+       return refresh;
+}
+EXPORT_SYMBOL(drm_mode_vrefresh);
+
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+       if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+               return;
+
+       p->crtc_hdisplay = p->hdisplay;
+       p->crtc_hsync_start = p->hsync_start;
+       p->crtc_hsync_end = p->hsync_end;
+       p->crtc_htotal = p->htotal;
+       p->crtc_hskew = p->hskew;
+       p->crtc_vdisplay = p->vdisplay;
+       p->crtc_vsync_start = p->vsync_start;
+       p->crtc_vsync_end = p->vsync_end;
+       p->crtc_vtotal = p->vtotal;
+
+       if (p->flags & DRM_MODE_FLAG_INTERLACE) {
+               if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+                       p->crtc_vdisplay /= 2;
+                       p->crtc_vsync_start /= 2;
+                       p->crtc_vsync_end /= 2;
+                       p->crtc_vtotal /= 2;
+               }
+
+               p->crtc_vtotal |= 1;
+       }
+
+       if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+               p->crtc_vdisplay *= 2;
+               p->crtc_vsync_start *= 2;
+               p->crtc_vsync_end *= 2;
+               p->crtc_vtotal *= 2;
+       }
+
+       if (p->vscan > 1) {
+               p->crtc_vdisplay *= p->vscan;
+               p->crtc_vsync_start *= p->vscan;
+               p->crtc_vsync_end *= p->vscan;
+               p->crtc_vtotal *= p->vscan;
+       }
+
+       p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+       p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+       p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+       p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+
+       p->crtc_hadjusted = false;
+       p->crtc_vadjusted = false;
+}
+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it.  Used to create new instances of established modes.
+ */
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+                                           struct drm_display_mode *mode)
+{
+       struct drm_display_mode *nmode;
+       int new_id;
+
+       nmode = drm_mode_create(dev);
+       if (!nmode)
+               return NULL;
+
+       new_id = nmode->base.id;
+       *nmode = *mode;
+       nmode->base.id = new_id;
+       INIT_LIST_HEAD(&nmode->head);
+       return nmode;
+}
+EXPORT_SYMBOL(drm_mode_duplicate);
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+{
+       /* do clock check convert to PICOS so fb modes get matched
+        * the same */
+       if (mode1->clock && mode2->clock) {
+               if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
+                       return false;
+       } else if (mode1->clock != mode2->clock)
+               return false;
+
+       if (mode1->hdisplay == mode2->hdisplay &&
+           mode1->hsync_start == mode2->hsync_start &&
+           mode1->hsync_end == mode2->hsync_end &&
+           mode1->htotal == mode2->htotal &&
+           mode1->hskew == mode2->hskew &&
+           mode1->vdisplay == mode2->vdisplay &&
+           mode1->vsync_start == mode2->vsync_start &&
+           mode1->vsync_end == mode2->vsync_end &&
+           mode1->vtotal == mode2->vtotal &&
+           mode1->vscan == mode2->vscan &&
+           mode1->flags == mode2->flags)
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits.  Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+void drm_mode_validate_size(struct drm_device *dev,
+                           struct list_head *mode_list,
+                           int maxX, int maxY, int maxPitch)
+{
+       struct drm_display_mode *mode;
+
+       list_for_each_entry(mode, mode_list, head) {
+               if (maxPitch > 0 && mode->hdisplay > maxPitch)
+                       mode->status = MODE_BAD_WIDTH;
+
+               if (maxX > 0 && mode->hdisplay > maxX)
+                       mode->status = MODE_VIRTUAL_X;
+
+               if (maxY > 0 && mode->vdisplay > maxY)
+                       mode->status = MODE_VIRTUAL_Y;
+       }
+}
+EXPORT_SYMBOL(drm_mode_validate_size);
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question.  This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+void drm_mode_validate_clocks(struct drm_device *dev,
+                             struct list_head *mode_list,
+                             int *min, int *max, int n_ranges)
+{
+       struct drm_display_mode *mode;
+       int i;
+
+       list_for_each_entry(mode, mode_list, head) {
+               bool good = false;
+               for (i = 0; i < n_ranges; i++) {
+                       if (mode->clock >= min[i] && mode->clock <= max[i]) {
+                               good = true;
+                               break;
+                       }
+               }
+               if (!good)
+                       mode->status = MODE_CLOCK_RANGE;
+       }
+}
+EXPORT_SYMBOL(drm_mode_validate_clocks);
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list.  If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void drm_mode_prune_invalid(struct drm_device *dev,
+                           struct list_head *mode_list, bool verbose)
+{
+       struct drm_display_mode *mode, *t;
+
+       list_for_each_entry_safe(mode, t, mode_list, head) {
+               if (mode->status != MODE_OK) {
+                       list_del(&mode->head);
+                       if (verbose) {
+                               drm_mode_debug_printmodeline(mode);
+                               DRM_DEBUG_KMS("Not using %s mode %d\n",
+                                       mode->name, mode->status);
+                       }
+                       drm_mode_destroy(dev, mode);
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_prune_invalid);
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+{
+       struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+       struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+       int diff;
+
+       diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+               ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+       if (diff)
+               return diff;
+       diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+       if (diff)
+               return diff;
+       diff = b->clock - a->clock;
+       return diff;
+}
+
+/* FIXME: what we don't have a list sort function? */
+/* list sort from Mark J Roberts (mjr@znex.org) */
+void list_sort(struct list_head *head,
+              int (*cmp)(struct list_head *a, struct list_head *b))
+{
+       struct list_head *p, *q, *e, *list, *tail, *oldhead;
+       int insize, nmerges, psize, qsize, i;
+
+       list = head->next;
+       list_del(head);
+       insize = 1;
+       for (;;) {
+               p = oldhead = list;
+               list = tail = NULL;
+               nmerges = 0;
+
+               while (p) {
+                       nmerges++;
+                       q = p;
+                       psize = 0;
+                       for (i = 0; i < insize; i++) {
+                               psize++;
+                               q = q->next == oldhead ? NULL : q->next;
+                               if (!q)
+                                       break;
+                       }
+
+                       qsize = insize;
+                       while (psize > 0 || (qsize > 0 && q)) {
+                               if (!psize) {
+                                       e = q;
+                                       q = q->next;
+                                       qsize--;
+                                       if (q == oldhead)
+                                               q = NULL;
+                               } else if (!qsize || !q) {
+                                       e = p;
+                                       p = p->next;
+                                       psize--;
+                                       if (p == oldhead)
+                                               p = NULL;
+                               } else if (cmp(p, q) <= 0) {
+                                       e = p;
+                                       p = p->next;
+                                       psize--;
+                                       if (p == oldhead)
+                                               p = NULL;
+                               } else {
+                                       e = q;
+                                       q = q->next;
+                                       qsize--;
+                                       if (q == oldhead)
+                                               q = NULL;
+                               }
+                               if (tail)
+                                       tail->next = e;
+                               else
+                                       list = e;
+                               e->prev = tail;
+                               tail = e;
+                       }
+                       p = q;
+               }
+
+               tail->next = list;
+               list->prev = tail;
+
+               if (nmerges <= 1)
+                       break;
+
+               insize *= 2;
+       }
+
+       head->next = list;
+       head->prev = list->prev;
+       list->prev->next = head;
+       list->prev = head;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+void drm_mode_sort(struct list_head *mode_list)
+{
+       list_sort(mode_list, drm_mode_compare);
+}
+EXPORT_SYMBOL(drm_mode_sort);
+
+/**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void drm_mode_connector_list_update(struct drm_connector *connector)
+{
+       struct drm_display_mode *mode;
+       struct drm_display_mode *pmode, *pt;
+       int found_it;
+
+       list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
+                                head) {
+               found_it = 0;
+               /* go through current modes checking for the new probed mode */
+               list_for_each_entry(mode, &connector->modes, head) {
+                       if (drm_mode_equal(pmode, mode)) {
+                               found_it = 1;
+                               /* if equal delete the probed mode */
+                               mode->status = pmode->status;
+                               /* Merge type bits together */
+                               mode->type |= pmode->type;
+                               list_del(&pmode->head);
+                               drm_mode_destroy(connector->dev, pmode);
+                               break;
+                       }
+               }
+
+               if (!found_it) {
+                       list_move_tail(&pmode->head, &connector->modes);
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_connector_list_update);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_pci.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_pci.c
new file mode 100644 (file)
index 0000000..577094f
--- /dev/null
@@ -0,0 +1,132 @@
+/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
+/**
+ * \file drm_pci.c
+ * \brief Functions and ioctls to manage PCI memory
+ *
+ * \warning These interfaces aren't stable yet.
+ *
+ * \todo Implement the remaining ioctl's for the PCI pools.
+ * \todo The wrappers here are so thin that they would be better off inlined..
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ * \author Leif Delgass <ldelgass@retinalburn.net>
+ */
+
+/*
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "drmP.h"
+
+/**********************************************************************/
+/** \name PCI memory */
+/*@{*/
+
+/**
+ * \brief Allocate a PCI consistent memory block, for DMA.
+ */
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
+                               dma_addr_t maxaddr)
+{
+       drm_dma_handle_t *dmah;
+#if 1
+       unsigned long addr;
+       size_t sz;
+#endif
+
+       /* pci_alloc_consistent only guarantees alignment to the smallest
+        * PAGE_SIZE order which is greater than or equal to the requested size.
+        * Return NULL here for now to make sure nobody tries for larger alignment
+        */
+       if (align > size)
+               return NULL;
+
+       if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
+               DRM_ERROR("Setting pci dma mask failed\n");
+               return NULL;
+       }
+
+       dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
+       if (!dmah)
+               return NULL;
+
+       dmah->size = size;
+       dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+
+       if (dmah->vaddr == NULL) {
+               kfree(dmah);
+               return NULL;
+       }
+
+       memset(dmah->vaddr, 0, size);
+
+       /* XXX - Is virt_to_page() legal for consistent mem? */
+       /* Reserve */
+       for (addr = (unsigned long)dmah->vaddr, sz = size;
+            sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+               SetPageReserved(virt_to_page(addr));
+       }
+
+       return dmah;
+}
+
+EXPORT_SYMBOL(drm_pci_alloc);
+
+/**
+ * \brief Free a PCI consistent memory block without freeing its descriptor.
+ *
+ * This function is for internal use in the Linux-specific DRM core code.
+ */
+void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+#if 1
+       unsigned long addr;
+       size_t sz;
+#endif
+
+       if (dmah->vaddr) {
+               /* XXX - Is virt_to_page() legal for consistent mem? */
+               /* Unreserve */
+               for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
+                    sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+                       ClearPageReserved(virt_to_page(addr));
+               }
+               dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+                                 dmah->busaddr);
+       }
+}
+
+/**
+ * \brief Free a PCI consistent memory block
+ */
+void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+       __drm_pci_free(dev, dmah);
+       kfree(dmah);
+}
+
+EXPORT_SYMBOL(drm_pci_free);
+
+/*@}*/
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_proc.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_proc.c
new file mode 100644 (file)
index 0000000..d379c4f
--- /dev/null
@@ -0,0 +1,234 @@
+/**
+ * \file drm_proc.c
+ * /proc support for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * \par Acknowledgements:
+ *    Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
+ *    the problem with the proc files not outputting all their information.
+ */
+
+/*
+ * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+/**
+ * Proc file list.
+ */
+static struct drm_info_list drm_proc_list[] = {
+       {"name", drm_name_info, 0},
+       {"vm", drm_vm_info, 0},
+       {"clients", drm_clients_info, 0},
+       {"queues", drm_queues_info, 0},
+       {"bufs", drm_bufs_info, 0},
+       {"gem_names", drm_gem_name_info, DRIVER_GEM},
+       {"gem_objects", drm_gem_object_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+       {"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
+
+static int drm_proc_open(struct inode *inode, struct file *file)
+{
+       struct drm_info_node* node = PDE(inode)->data;
+
+       return single_open(file, node->info_ent->show, node);
+}
+
+static const struct file_operations drm_proc_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_proc_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+
+/**
+ * Initialize a given set of proc files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI proc dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of proc files represented by an array of
+ * gdm_proc_lists in the given root directory.
+ */
+int drm_proc_create_files(struct drm_info_list *files, int count,
+                         struct proc_dir_entry *root, struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+       struct proc_dir_entry *ent;
+       struct drm_info_node *tmp;
+       char name[64];
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+               u32 features = files[i].driver_features;
+
+               if (features != 0 &&
+                   (dev->driver->driver_features & features) != features)
+                       continue;
+
+               tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+               if (tmp == NULL) {
+                       ret = -1;
+                       goto fail;
+               }
+               tmp->minor = minor;
+               tmp->info_ent = &files[i];
+               list_add(&tmp->list, &minor->proc_nodes.list);
+
+               ent = proc_create_data(files[i].name, S_IRUGO, root,
+                                      &drm_proc_fops, tmp);
+               if (!ent) {
+                       DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
+                                 name, files[i].name);
+                       list_del(&tmp->list);
+                       kfree(tmp);
+                       ret = -1;
+                       goto fail;
+               }
+
+       }
+       return 0;
+
+fail:
+       for (i = 0; i < count; i++)
+               remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
+       return ret;
+}
+
+/**
+ * Initialize the DRI proc filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
+ * \param root DRI proc dir entry.
+ * \param dev_root resulting DRI device proc dir entry.
+ * \return root entry pointer on success, or NULL on failure.
+ *
+ * Create the DRI proc root entry "/proc/dri", the device proc root entry
+ * "/proc/dri/%minor%/", and each entry in proc_list as
+ * "/proc/dri/%minor%/%name%".
+ */
+int drm_proc_init(struct drm_minor *minor, int minor_id,
+                 struct proc_dir_entry *root)
+{
+       struct drm_device *dev = minor->dev;
+       char name[64];
+       int ret;
+
+       INIT_LIST_HEAD(&minor->proc_nodes.list);
+       sprintf(name, "%d", minor_id);
+       minor->proc_root = proc_mkdir(name, root);
+       if (!minor->proc_root) {
+               DRM_ERROR("Cannot create /proc/dri/%s\n", name);
+               return -1;
+       }
+
+       ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
+                                   minor->proc_root, minor);
+       if (ret) {
+               remove_proc_entry(name, root);
+               minor->proc_root = NULL;
+               DRM_ERROR("Failed to create core drm proc files\n");
+               return ret;
+       }
+
+       if (dev->driver->proc_init) {
+               ret = dev->driver->proc_init(minor);
+               if (ret) {
+                       DRM_ERROR("DRM: Driver failed to initialize "
+                                 "/proc/dri.\n");
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+int drm_proc_remove_files(struct drm_info_list *files, int count,
+                         struct drm_minor *minor)
+{
+       struct list_head *pos, *q;
+       struct drm_info_node *tmp;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               list_for_each_safe(pos, q, &minor->proc_nodes.list) {
+                       tmp = list_entry(pos, struct drm_info_node, list);
+                       if (tmp->info_ent == &files[i]) {
+                               remove_proc_entry(files[i].name,
+                                                 minor->proc_root);
+                               list_del(pos);
+                               kfree(tmp);
+                       }
+               }
+       }
+       return 0;
+}
+
+/**
+ * Cleanup the proc filesystem resources.
+ *
+ * \param minor device minor number.
+ * \param root DRI proc dir entry.
+ * \param dev_root DRI device proc dir entry.
+ * \return always zero.
+ *
+ * Remove all proc entries created by proc_init().
+ */
+int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
+{
+       struct drm_device *dev = minor->dev;
+       char name[64];
+
+       if (!root || !minor->proc_root)
+               return 0;
+
+       if (dev->driver->proc_cleanup)
+               dev->driver->proc_cleanup(minor);
+
+       drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
+
+       sprintf(name, "%d", minor->index);
+       remove_proc_entry(name, root);
+
+       return 0;
+}
+
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_scatter.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_scatter.c
new file mode 100644 (file)
index 0000000..c7823c8
--- /dev/null
@@ -0,0 +1,218 @@
+/**
+ * \file drm_scatter.c
+ * IOCTLs to manage scatter/gather memory
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include "drmP.h"
+
+#define DEBUG_SCATTER 0
+
+static inline void *drm_vmalloc_dma(unsigned long size)
+{
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+       return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
+#else
+       return vmalloc_32(size);
+#endif
+}
+
+void drm_sg_cleanup(struct drm_sg_mem * entry)
+{
+       struct page *page;
+       int i;
+
+       for (i = 0; i < entry->pages; i++) {
+               page = entry->pagelist[i];
+               if (page)
+                       ClearPageReserved(page);
+       }
+
+       vfree(entry->virtual);
+
+       kfree(entry->busaddr);
+       kfree(entry->pagelist);
+       kfree(entry);
+}
+
+#ifdef _LP64
+# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
+#else
+# define ScatterHandle(x) (unsigned int)(x)
+#endif
+
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+{
+       struct drm_sg_mem *entry;
+       unsigned long pages, i, j;
+
+       DRM_DEBUG("\n");
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       if (dev->sg)
+               return -EINVAL;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       memset(entry, 0, sizeof(*entry));
+       pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+       DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
+
+       entry->pages = pages;
+       entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL);
+       if (!entry->pagelist) {
+               kfree(entry);
+               return -ENOMEM;
+       }
+
+       memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
+
+       entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL);
+       if (!entry->busaddr) {
+               kfree(entry->pagelist);
+               kfree(entry);
+               return -ENOMEM;
+       }
+       memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
+
+       entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+       if (!entry->virtual) {
+               kfree(entry->busaddr);
+               kfree(entry->pagelist);
+               kfree(entry);
+               return -ENOMEM;
+       }
+
+       /* This also forces the mapping of COW pages, so our page list
+        * will be valid.  Please don't remove it...
+        */
+       memset(entry->virtual, 0, pages << PAGE_SHIFT);
+
+       entry->handle = ScatterHandle((unsigned long)entry->virtual);
+
+       DRM_DEBUG("handle  = %08lx\n", entry->handle);
+       DRM_DEBUG("virtual = %p\n", entry->virtual);
+
+       for (i = (unsigned long)entry->virtual, j = 0; j < pages;
+            i += PAGE_SIZE, j++) {
+               entry->pagelist[j] = vmalloc_to_page((void *)i);
+               if (!entry->pagelist[j])
+                       goto failed;
+               SetPageReserved(entry->pagelist[j]);
+       }
+
+       request->handle = entry->handle;
+
+       dev->sg = entry;
+
+#if DEBUG_SCATTER
+       /* Verify that each page points to its virtual address, and vice
+        * versa.
+        */
+       {
+               int error = 0;
+
+               for (i = 0; i < pages; i++) {
+                       unsigned long *tmp;
+
+                       tmp = page_address(entry->pagelist[i]);
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               *tmp = 0xcafebabe;
+                       }
+                       tmp = (unsigned long *)((u8 *) entry->virtual +
+                                               (PAGE_SIZE * i));
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               if (*tmp != 0xcafebabe && error == 0) {
+                                       error = 1;
+                                       DRM_ERROR("Scatter allocation error, "
+                                                 "pagelist does not match "
+                                                 "virtual mapping\n");
+                               }
+                       }
+                       tmp = page_address(entry->pagelist[i]);
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               *tmp = 0;
+                       }
+               }
+               if (error == 0)
+                       DRM_ERROR("Scatter allocation matches pagelist\n");
+       }
+#endif
+
+       return 0;
+
+      failed:
+       drm_sg_cleanup(entry);
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_sg_alloc);
+
+
+int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_scatter_gather *request = data;
+
+       return drm_sg_alloc(dev, request);
+
+}
+
+int drm_sg_free(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_scatter_gather *request = data;
+       struct drm_sg_mem *entry;
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       entry = dev->sg;
+       dev->sg = NULL;
+
+       if (!entry || entry->handle != request->handle)
+               return -EINVAL;
+
+       DRM_DEBUG("virtual  = %p\n", entry->virtual);
+
+       drm_sg_cleanup(entry);
+
+       return 0;
+}
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_sman.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_sman.c
new file mode 100644 (file)
index 0000000..463aed9
--- /dev/null
@@ -0,0 +1,352 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple memory manager interface that keeps track on allocate regions on a
+ * per "owner" basis. All regions associated with an "owner" can be released
+ * with a simple call. Typically if the "owner" exists. The owner is any
+ * "unsigned long" identifier. Can typically be a pointer to a file private
+ * struct or a context identifier.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drm_sman.h"
+
+struct drm_owner_item {
+       struct drm_hash_item owner_hash;
+       struct list_head sman_list;
+       struct list_head mem_blocks;
+};
+
+void drm_sman_takedown(struct drm_sman * sman)
+{
+       drm_ht_remove(&sman->user_hash_tab);
+       drm_ht_remove(&sman->owner_hash_tab);
+       kfree(sman->mm);
+}
+
+EXPORT_SYMBOL(drm_sman_takedown);
+
+int
+drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+             unsigned int user_order, unsigned int owner_order)
+{
+       int ret = 0;
+
+       sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
+                                                 sizeof(*sman->mm),
+                                                 GFP_KERNEL);
+       if (!sman->mm) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       sman->num_managers = num_managers;
+       INIT_LIST_HEAD(&sman->owner_items);
+       ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
+       if (ret)
+               goto out1;
+       ret = drm_ht_create(&sman->user_hash_tab, user_order);
+       if (!ret)
+               goto out;
+
+       drm_ht_remove(&sman->owner_hash_tab);
+out1:
+       kfree(sman->mm);
+out:
+       return ret;
+}
+
+EXPORT_SYMBOL(drm_sman_init);
+
+static void *drm_sman_mm_allocate(void *private, unsigned long size,
+                                 unsigned alignment)
+{
+       struct drm_mm *mm = (struct drm_mm *) private;
+       struct drm_mm_node *tmp;
+
+       tmp = drm_mm_search_free(mm, size, alignment, 1);
+       if (!tmp) {
+               return NULL;
+       }
+       tmp = drm_mm_get_block(tmp, size, alignment);
+       return tmp;
+}
+
+static void drm_sman_mm_free(void *private, void *ref)
+{
+       struct drm_mm_node *node = (struct drm_mm_node *) ref;
+
+       drm_mm_put_block(node);
+}
+
+static void drm_sman_mm_destroy(void *private)
+{
+       struct drm_mm *mm = (struct drm_mm *) private;
+       drm_mm_takedown(mm);
+       kfree(mm);
+}
+
+static unsigned long drm_sman_mm_offset(void *private, void *ref)
+{
+       struct drm_mm_node *node = (struct drm_mm_node *) ref;
+       return node->start;
+}
+
+int
+drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+                  unsigned long start, unsigned long size)
+{
+       struct drm_sman_mm *sman_mm;
+       struct drm_mm *mm;
+       int ret;
+
+       BUG_ON(manager >= sman->num_managers);
+
+       sman_mm = &sman->mm[manager];
+       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+       if (!mm) {
+               return -ENOMEM;
+       }
+       sman_mm->private = mm;
+       ret = drm_mm_init(mm, start, size);
+
+       if (ret) {
+               kfree(mm);
+               return ret;
+       }
+
+       sman_mm->allocate = drm_sman_mm_allocate;
+       sman_mm->free = drm_sman_mm_free;
+       sman_mm->destroy = drm_sman_mm_destroy;
+       sman_mm->offset = drm_sman_mm_offset;
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_set_range);
+
+int
+drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
+                    struct drm_sman_mm * allocator)
+{
+       BUG_ON(manager >= sman->num_managers);
+       sman->mm[manager] = *allocator;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_sman_set_manager);
+
+static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
+                                                unsigned long owner)
+{
+       int ret;
+       struct drm_hash_item *owner_hash_item;
+       struct drm_owner_item *owner_item;
+
+       ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
+       if (!ret) {
+               return drm_hash_entry(owner_hash_item, struct drm_owner_item,
+                                     owner_hash);
+       }
+
+       owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
+       if (!owner_item)
+               goto out;
+
+       INIT_LIST_HEAD(&owner_item->mem_blocks);
+       owner_item->owner_hash.key = owner;
+       if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
+               goto out1;
+
+       list_add_tail(&owner_item->sman_list, &sman->owner_items);
+       return owner_item;
+
+out1:
+       kfree(owner_item);
+out:
+       return NULL;
+}
+
+struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
+                                   unsigned long size, unsigned alignment,
+                                   unsigned long owner)
+{
+       void *tmp;
+       struct drm_sman_mm *sman_mm;
+       struct drm_owner_item *owner_item;
+       struct drm_memblock_item *memblock;
+
+       BUG_ON(manager >= sman->num_managers);
+
+       sman_mm = &sman->mm[manager];
+       tmp = sman_mm->allocate(sman_mm->private, size, alignment);
+
+       if (!tmp) {
+               return NULL;
+       }
+
+       memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
+
+       if (!memblock)
+               goto out;
+
+       memblock->mm_info = tmp;
+       memblock->mm = sman_mm;
+       memblock->sman = sman;
+
+       if (drm_ht_just_insert_please
+           (&sman->user_hash_tab, &memblock->user_hash,
+            (unsigned long)memblock, 32, 0, 0))
+               goto out1;
+
+       owner_item = drm_sman_get_owner_item(sman, owner);
+       if (!owner_item)
+               goto out2;
+
+       list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
+
+       return memblock;
+
+out2:
+       drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
+out1:
+       kfree(memblock);
+out:
+       sman_mm->free(sman_mm->private, tmp);
+
+       return NULL;
+}
+
+EXPORT_SYMBOL(drm_sman_alloc);
+
+static void drm_sman_free(struct drm_memblock_item *item)
+{
+       struct drm_sman *sman = item->sman;
+
+       list_del(&item->owner_list);
+       drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
+       item->mm->free(item->mm->private, item->mm_info);
+       kfree(item);
+}
+
+int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
+{
+       struct drm_hash_item *hash_item;
+       struct drm_memblock_item *memblock_item;
+
+       if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
+               return -EINVAL;
+
+       memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
+                                      user_hash);
+       drm_sman_free(memblock_item);
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_free_key);
+
+static void drm_sman_remove_owner(struct drm_sman *sman,
+                                 struct drm_owner_item *owner_item)
+{
+       list_del(&owner_item->sman_list);
+       drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
+       kfree(owner_item);
+}
+
+int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
+{
+
+       struct drm_hash_item *hash_item;
+       struct drm_owner_item *owner_item;
+
+       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+               return -1;
+       }
+
+       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+       if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
+               drm_sman_remove_owner(sman, owner_item);
+               return -1;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_owner_clean);
+
+static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
+                                     struct drm_owner_item *owner_item)
+{
+       struct drm_memblock_item *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
+                                owner_list) {
+               drm_sman_free(entry);
+       }
+       drm_sman_remove_owner(sman, owner_item);
+}
+
+void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
+{
+
+       struct drm_hash_item *hash_item;
+       struct drm_owner_item *owner_item;
+
+       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+
+               return;
+       }
+
+       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+       drm_sman_do_owner_cleanup(sman, owner_item);
+}
+
+EXPORT_SYMBOL(drm_sman_owner_cleanup);
+
+void drm_sman_cleanup(struct drm_sman *sman)
+{
+       struct drm_owner_item *entry, *next;
+       unsigned int i;
+       struct drm_sman_mm *sman_mm;
+
+       list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
+               drm_sman_do_owner_cleanup(sman, entry);
+       }
+       if (sman->mm) {
+               for (i = 0; i < sman->num_managers; ++i) {
+                       sman_mm = &sman->mm[i];
+                       if (sman_mm->private) {
+                               sman_mm->destroy(sman_mm->private);
+                               sman_mm->private = NULL;
+                       }
+               }
+       }
+}
+
+EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_stub.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_stub.c
new file mode 100644 (file)
index 0000000..ad73e14
--- /dev/null
@@ -0,0 +1,559 @@
+/**
+ * \file drm_stub.h
+ * Stub support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include "drmP.h"
+#include "drm_core.h"
+
+unsigned int drm_debug = 0;    /* 1 to enable debug output */
+EXPORT_SYMBOL(drm_debug);
+
+MODULE_AUTHOR(CORE_AUTHOR);
+MODULE_DESCRIPTION(CORE_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_PARM_DESC(debug, "Enable debug output");
+
+module_param_named(debug, drm_debug, int, 0600);
+
+struct idr drm_minors_idr;
+
+struct class *drm_class;
+struct proc_dir_entry *drm_proc_root;
+struct dentry *drm_debugfs_root;
+void drm_ut_debug_printk(unsigned int request_level,
+                        const char *prefix,
+                        const char *function_name,
+                        const char *format, ...)
+{
+       va_list args;
+
+       if (drm_debug & request_level) {
+               if (function_name)
+                       printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
+               va_start(args, format);
+               vprintk(format, args);
+               va_end(args);
+       }
+}
+EXPORT_SYMBOL(drm_ut_debug_printk);
+static int drm_minor_get_id(struct drm_device *dev, int type)
+{
+       int new_id;
+       int ret;
+       int base = 0, limit = 63;
+
+       if (type == DRM_MINOR_CONTROL) {
+                base += 64;
+                limit = base + 127;
+        } else if (type == DRM_MINOR_RENDER) {
+                base += 128;
+                limit = base + 255;
+        }
+
+again:
+       if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+       mutex_lock(&dev->struct_mutex);
+       ret = idr_get_new_above(&drm_minors_idr, NULL,
+                               base, &new_id);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret == -EAGAIN) {
+               goto again;
+       } else if (ret) {
+               return ret;
+       }
+
+       if (new_id >= limit) {
+               idr_remove(&drm_minors_idr, new_id);
+               return -EINVAL;
+       }
+       return new_id;
+}
+
+struct drm_master *drm_master_create(struct drm_minor *minor)
+{
+       struct drm_master *master;
+
+       master = kzalloc(sizeof(*master), GFP_KERNEL);
+       if (!master)
+               return NULL;
+
+       kref_init(&master->refcount);
+       spin_lock_init(&master->lock.spinlock);
+       init_waitqueue_head(&master->lock.lock_queue);
+       drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
+       INIT_LIST_HEAD(&master->magicfree);
+       master->minor = minor;
+
+       list_add_tail(&master->head, &minor->master_list);
+
+       return master;
+}
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+       kref_get(&master->refcount);
+       return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct kref *kref)
+{
+       struct drm_master *master = container_of(kref, struct drm_master, refcount);
+       struct drm_magic_entry *pt, *next;
+       struct drm_device *dev = master->minor->dev;
+       struct drm_map_list *r_list, *list_temp;
+
+       list_del(&master->head);
+
+       if (dev->driver->master_destroy)
+               dev->driver->master_destroy(dev, master);
+
+       list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+               if (r_list->master == master) {
+                       drm_rmmap_locked(dev, r_list->map);
+                       r_list = NULL;
+               }
+       }
+
+       if (master->unique) {
+               kfree(master->unique);
+               master->unique = NULL;
+               master->unique_len = 0;
+       }
+
+       list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+               list_del(&pt->head);
+               drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+               kfree(pt);
+       }
+
+       drm_ht_remove(&master->magiclist);
+
+       kfree(master);
+}
+
+void drm_master_put(struct drm_master **master)
+{
+       kref_put(&(*master)->refcount, drm_master_destroy);
+       *master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
+
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       int ret = 0;
+
+       if (file_priv->is_master)
+               return 0;
+
+       if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
+               return -EINVAL;
+
+       if (!file_priv->master)
+               return -EINVAL;
+
+       if (!file_priv->minor->master &&
+           file_priv->minor->master != file_priv->master) {
+               mutex_lock(&dev->struct_mutex);
+               file_priv->minor->master = drm_master_get(file_priv->master);
+               file_priv->is_master = 1;
+               if (dev->driver->master_set) {
+                       ret = dev->driver->master_set(dev, file_priv, false);
+                       if (unlikely(ret != 0)) {
+                               file_priv->is_master = 0;
+                               drm_master_put(&file_priv->minor->master);
+                       }
+               }
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       return 0;
+}
+
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       if (!file_priv->is_master)
+               return -EINVAL;
+
+       if (!file_priv->minor->master)
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+       if (dev->driver->master_drop)
+               dev->driver->master_drop(dev, file_priv, false);
+       drm_master_put(&file_priv->minor->master);
+       file_priv->is_master = 0;
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
+                          const struct pci_device_id *ent,
+                          struct drm_driver *driver)
+{
+       int retcode;
+
+       INIT_LIST_HEAD(&dev->filelist);
+       INIT_LIST_HEAD(&dev->ctxlist);
+       INIT_LIST_HEAD(&dev->vmalist);
+       INIT_LIST_HEAD(&dev->maplist);
+       INIT_LIST_HEAD(&dev->vblank_event_list);
+
+       spin_lock_init(&dev->count_lock);
+       spin_lock_init(&dev->drw_lock);
+       spin_lock_init(&dev->event_lock);
+       init_timer(&dev->timer);
+       mutex_init(&dev->struct_mutex);
+       mutex_init(&dev->ctxlist_mutex);
+
+       idr_init(&dev->drw_idr);
+
+       dev->pdev = pdev;
+       dev->pci_device = pdev->device;
+       dev->pci_vendor = pdev->vendor;
+
+#ifdef __alpha__
+       dev->hose = pdev->sysdata;
+#endif
+
+       if (drm_ht_create(&dev->map_hash, 12)) {
+               return -ENOMEM;
+       }
+
+       /* the DRM has 6 basic counters */
+       dev->counters = 6;
+       dev->types[0] = _DRM_STAT_LOCK;
+       dev->types[1] = _DRM_STAT_OPENS;
+       dev->types[2] = _DRM_STAT_CLOSES;
+       dev->types[3] = _DRM_STAT_IOCTLS;
+       dev->types[4] = _DRM_STAT_LOCKS;
+       dev->types[5] = _DRM_STAT_UNLOCKS;
+
+       dev->driver = driver;
+
+       if (drm_core_has_AGP(dev)) {
+               if (drm_device_is_agp(dev))
+                       dev->agp = drm_agp_init(dev);
+               if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+                   && (dev->agp == NULL)) {
+                       DRM_ERROR("Cannot initialize the agpgart module.\n");
+                       retcode = -EINVAL;
+                       goto error_out_unreg;
+               }
+               if (drm_core_has_MTRR(dev)) {
+                       if (dev->agp)
+                               dev->agp->agp_mtrr =
+                                   mtrr_add(dev->agp->agp_info.aper_base,
+                                            dev->agp->agp_info.aper_size *
+                                            1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+               }
+       }
+
+
+       retcode = drm_ctxbitmap_init(dev);
+       if (retcode) {
+               DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+               goto error_out_unreg;
+       }
+
+       if (driver->driver_features & DRIVER_GEM) {
+               retcode = drm_gem_init(dev);
+               if (retcode) {
+                       DRM_ERROR("Cannot initialize graphics execution "
+                                 "manager (GEM)\n");
+                       goto error_out_unreg;
+               }
+       }
+
+       return 0;
+
+      error_out_unreg:
+       drm_lastclose(dev);
+       return retcode;
+}
+
+
+/**
+ * Get a secondary minor number.
+ *
+ * \param dev device data structure
+ * \param sec-minor structure to hold the assigned minor
+ * \return negative number on failure.
+ *
+ * Search an empty entry and initialize it to the given parameters, and
+ * create the proc init entry via proc_init(). This routines assigns
+ * minor numbers to secondary heads of multi-headed cards
+ */
+static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+{
+       struct drm_minor *new_minor;
+       int ret;
+       int minor_id;
+
+       DRM_DEBUG("\n");
+
+       minor_id = drm_minor_get_id(dev, type);
+       if (minor_id < 0)
+               return minor_id;
+
+       new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
+       if (!new_minor) {
+               ret = -ENOMEM;
+               goto err_idr;
+       }
+
+       new_minor->type = type;
+       new_minor->device = MKDEV(DRM_MAJOR, minor_id);
+       new_minor->dev = dev;
+       new_minor->index = minor_id;
+       INIT_LIST_HEAD(&new_minor->master_list);
+
+       idr_replace(&drm_minors_idr, new_minor, minor_id);
+
+       if (type == DRM_MINOR_LEGACY) {
+               ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
+               if (ret) {
+                       DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
+                       goto err_mem;
+               }
+       } else
+               new_minor->proc_root = NULL;
+
+#if defined(CONFIG_DEBUG_FS)
+       ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
+       if (ret) {
+               DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
+               goto err_g2;
+       }
+#endif
+
+       ret = drm_sysfs_device_add(new_minor);
+       if (ret) {
+               printk(KERN_ERR
+                      "DRM: Error sysfs_device_add.\n");
+               goto err_g2;
+       }
+       *minor = new_minor;
+
+       DRM_DEBUG("new minor assigned %d\n", minor_id);
+       return 0;
+
+
+err_g2:
+       if (new_minor->type == DRM_MINOR_LEGACY)
+               drm_proc_cleanup(new_minor, drm_proc_root);
+err_mem:
+       kfree(new_minor);
+err_idr:
+       idr_remove(&drm_minors_idr, minor_id);
+       *minor = NULL;
+       return ret;
+}
+
+/**
+ * Register.
+ *
+ * \param pdev - PCI device structure
+ * \param ent entry from the PCI ID table with device type flags
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+               struct drm_driver *driver)
+{
+       struct drm_device *dev;
+       int ret;
+
+       DRM_DEBUG("\n");
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               goto err_g1;
+
+       pci_set_master(pdev);
+       if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
+               printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+               goto err_g2;
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               pci_set_drvdata(pdev, dev);
+               ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+               if (ret)
+                       goto err_g2;
+       }
+
+       if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
+               goto err_g3;
+
+       if (dev->driver->load) {
+               ret = dev->driver->load(dev, ent->driver_data);
+               if (ret)
+                       goto err_g4;
+       }
+
+        /* setup the grouping for the legacy output */
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
+               if (ret)
+                       goto err_g4;
+       }
+
+       list_add_tail(&dev->driver_item, &driver->device_list);
+
+       DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+                driver->name, driver->major, driver->minor, driver->patchlevel,
+                driver->date, pci_name(pdev), dev->primary->index);
+
+       return 0;
+
+err_g4:
+       drm_put_minor(&dev->primary);
+err_g3:
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_put_minor(&dev->control);
+err_g2:
+       pci_disable_device(pdev);
+err_g1:
+       kfree(dev);
+       return ret;
+}
+EXPORT_SYMBOL(drm_get_dev);
+
+/**
+ * Put a secondary minor number.
+ *
+ * \param sec_minor - structure to be released
+ * \return always zero
+ *
+ * Cleans up the proc resources. Not legal for this to be the
+ * last minor released.
+ *
+ */
+int drm_put_minor(struct drm_minor **minor_p)
+{
+       struct drm_minor *minor = *minor_p;
+
+       DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+       if (minor->type == DRM_MINOR_LEGACY)
+               drm_proc_cleanup(minor, drm_proc_root);
+#if defined(CONFIG_DEBUG_FS)
+       drm_debugfs_cleanup(minor);
+#endif
+
+       drm_sysfs_device_remove(minor);
+
+       idr_remove(&drm_minors_idr, minor->index);
+
+       kfree(minor);
+       *minor_p = NULL;
+       return 0;
+}
+
+/**
+ * Called via drm_exit() at module unload time or when pci device is
+ * unplugged.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
+ *
+ * \sa drm_init
+ */
+void drm_put_dev(struct drm_device *dev)
+{
+       struct drm_driver *driver;
+       struct drm_map_list *r_list, *list_temp;
+
+       DRM_DEBUG("\n");
+
+       if (!dev) {
+               DRM_ERROR("cleanup called no dev\n");
+               return;
+       }
+       driver = dev->driver;
+
+       drm_vblank_cleanup(dev);
+
+       drm_lastclose(dev);
+
+       if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->agp_mtrr >= 0) {
+               int retval;
+               retval = mtrr_del(dev->agp->agp_mtrr,
+                                 dev->agp->agp_info.aper_base,
+                                 dev->agp->agp_info.aper_size * 1024 * 1024);
+               DRM_DEBUG("mtrr_del=%d\n", retval);
+       }
+
+       if (dev->driver->unload)
+               dev->driver->unload(dev);
+
+       if (drm_core_has_AGP(dev) && dev->agp) {
+               kfree(dev->agp);
+               dev->agp = NULL;
+       }
+
+       list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+               drm_rmmap(dev, r_list->map);
+       drm_ht_remove(&dev->map_hash);
+
+       drm_ctxbitmap_cleanup(dev);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_put_minor(&dev->control);
+
+       if (driver->driver_features & DRIVER_GEM)
+               drm_gem_destroy(dev);
+
+       drm_put_minor(&dev->primary);
+
+       if (dev->devname) {
+               kfree(dev->devname);
+               dev->devname = NULL;
+       }
+       kfree(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_sysfs.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_sysfs.c
new file mode 100644 (file)
index 0000000..7e42b7e
--- /dev/null
@@ -0,0 +1,549 @@
+
+/*
+ * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
+ *               extra sysfs attribute from DRM. Normal drm_sysfs_class
+ *               does not allow adding attributes.
+ *
+ * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2003-2004 IBM Corp.
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+
+#include "drm_sysfs.h"
+#include "drm_core.h"
+#include "drmP.h"
+
+#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
+#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+
+static struct device_type drm_sysfs_device_minor = {
+       .name = "drm_minor"
+};
+
+/**
+ * drm_class_suspend - DRM class suspend hook
+ * @dev: Linux device to suspend
+ * @state: power state to enter
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its suspend hook, if present.
+ */
+static int drm_class_suspend(struct device *dev, pm_message_t state)
+{
+       if (dev->type == &drm_sysfs_device_minor) {
+               struct drm_minor *drm_minor = to_drm_minor(dev);
+               struct drm_device *drm_dev = drm_minor->dev;
+
+               if (drm_minor->type == DRM_MINOR_LEGACY &&
+                   !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
+                   drm_dev->driver->suspend)
+                       return drm_dev->driver->suspend(drm_dev, state);
+       }
+       return 0;
+}
+
+/**
+ * drm_class_resume - DRM class resume hook
+ * @dev: Linux device to resume
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its resume hook, if present.
+ */
+static int drm_class_resume(struct device *dev)
+{
+       if (dev->type == &drm_sysfs_device_minor) {
+               struct drm_minor *drm_minor = to_drm_minor(dev);
+               struct drm_device *drm_dev = drm_minor->dev;
+
+               if (drm_minor->type == DRM_MINOR_LEGACY &&
+                   !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
+                   drm_dev->driver->resume)
+                       return drm_dev->driver->resume(drm_dev);
+       }
+       return 0;
+}
+
+/* Display the version of drm_core. This doesn't work right in current design */
+static ssize_t version_show(struct class *dev, char *buf)
+{
+       return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
+                      CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+}
+
+static char *drm_devnode(struct device *dev, mode_t *mode)
+{
+       return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
+}
+
+static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
+
+/**
+ * drm_sysfs_create - create a struct drm_sysfs_class structure
+ * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
+ * @name: pointer to a string for the name of this class.
+ *
+ * This is used to create DRM class pointer that can then be used
+ * in calls to drm_sysfs_device_add().
+ *
+ * Note, the pointer created here is to be destroyed when finished by making a
+ * call to drm_sysfs_destroy().
+ */
+struct class *drm_sysfs_create(struct module *owner, char *name)
+{
+       struct class *class;
+       int err;
+
+       class = class_create(owner, name);
+       if (IS_ERR(class)) {
+               err = PTR_ERR(class);
+               goto err_out;
+       }
+
+       class->suspend = drm_class_suspend;
+       class->resume = drm_class_resume;
+
+       err = class_create_file(class, &class_attr_version);
+       if (err)
+               goto err_out_class;
+
+       class->devnode = drm_devnode;
+
+       return class;
+
+err_out_class:
+       class_destroy(class);
+err_out:
+       return ERR_PTR(err);
+}
+
+/**
+ * drm_sysfs_destroy - destroys DRM class
+ *
+ * Destroy the DRM device class.
+ */
+void drm_sysfs_destroy(void)
+{
+       if ((drm_class == NULL) || (IS_ERR(drm_class)))
+               return;
+       class_remove_file(drm_class, &class_attr_version);
+       class_destroy(drm_class);
+}
+
+/**
+ * drm_sysfs_device_release - do nothing
+ * @dev: Linux device
+ *
+ * Normally, this would free the DRM device associated with @dev, along
+ * with cleaning up any other stuff.  But we do that in the DRM core, so
+ * this function can just return and hope that the core does its job.
+ */
+static void drm_sysfs_device_release(struct device *dev)
+{
+       memset(dev, 0, sizeof(struct device));
+       return;
+}
+
+/*
+ * Connector properties
+ */
+static ssize_t status_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       enum drm_connector_status status;
+
+       status = connector->funcs->detect(connector);
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       drm_get_connector_status_name(status));
+}
+
+static ssize_t dpms_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       uint64_t dpms_status;
+       int ret;
+
+       ret = drm_connector_property_get_value(connector,
+                                           dev->mode_config.dpms_property,
+                                           &dpms_status);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       drm_get_dpms_name((int)dpms_status));
+}
+
+static ssize_t enabled_show(struct device *device,
+                           struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
+                       "disabled");
+}
+
+static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
+                        char *buf, loff_t off, size_t count)
+{
+       struct device *connector_dev = container_of(kobj, struct device, kobj);
+       struct drm_connector *connector = to_drm_connector(connector_dev);
+       unsigned char *edid;
+       size_t size;
+
+       if (!connector->edid_blob_ptr)
+               return 0;
+
+       edid = connector->edid_blob_ptr->data;
+       size = connector->edid_blob_ptr->length;
+       if (!edid)
+               return 0;
+
+       if (off >= size)
+               return 0;
+
+       if (off + count > size)
+               count = size - off;
+       memcpy(buf, edid + off, count);
+
+       return count;
+}
+
+static ssize_t modes_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_display_mode *mode;
+       int written = 0;
+
+       list_for_each_entry(mode, &connector->modes, head) {
+               written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+                                   mode->name);
+       }
+
+       return written;
+}
+
+static ssize_t subconnector_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       struct drm_property *prop = NULL;
+       uint64_t subconnector;
+       int is_tv = 0;
+       int ret;
+
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+                       prop = dev->mode_config.dvi_i_subconnector_property;
+                       break;
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+               case DRM_MODE_CONNECTOR_TV:
+                       prop = dev->mode_config.tv_subconnector_property;
+                       is_tv = 1;
+                       break;
+               default:
+                       DRM_ERROR("Wrong connector type for this property\n");
+                       return 0;
+       }
+
+       if (!prop) {
+               DRM_ERROR("Unable to find subconnector property\n");
+               return 0;
+       }
+
+       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+                       drm_get_tv_subconnector_name((int)subconnector) :
+                       drm_get_dvi_i_subconnector_name((int)subconnector));
+}
+
+static ssize_t select_subconnector_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       struct drm_property *prop = NULL;
+       uint64_t subconnector;
+       int is_tv = 0;
+       int ret;
+
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+                       prop = dev->mode_config.dvi_i_select_subconnector_property;
+                       break;
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+               case DRM_MODE_CONNECTOR_TV:
+                       prop = dev->mode_config.tv_select_subconnector_property;
+                       is_tv = 1;
+                       break;
+               default:
+                       DRM_ERROR("Wrong connector type for this property\n");
+                       return 0;
+       }
+
+       if (!prop) {
+               DRM_ERROR("Unable to find select subconnector property\n");
+               return 0;
+       }
+
+       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+                       drm_get_tv_select_name((int)subconnector) :
+                       drm_get_dvi_i_select_name((int)subconnector));
+}
+
+static struct device_attribute connector_attrs[] = {
+       __ATTR_RO(status),
+       __ATTR_RO(enabled),
+       __ATTR_RO(dpms),
+       __ATTR_RO(modes),
+};
+
+/* These attributes are for both DVI-I connectors and all types of tv-out. */
+static struct device_attribute connector_attrs_opt1[] = {
+       __ATTR_RO(subconnector),
+       __ATTR_RO(select_subconnector),
+};
+
+static struct bin_attribute edid_attr = {
+       .attr.name = "edid",
+       .attr.mode = 0444,
+       .size = 128,
+       .read = edid_show,
+};
+
+/**
+ * drm_sysfs_connector_add - add an connector to sysfs
+ * @connector: connector to add
+ *
+ * Create an connector device in sysfs, along with its associated connector
+ * properties (so far, connection status, dpms, mode list & edid) and
+ * generate a hotplug event so userspace knows there's a new connector
+ * available.
+ *
+ * Note:
+ * This routine should only be called *once* for each DRM minor registered.
+ * A second call for an already registered device will trigger the BUG_ON
+ * below.
+ */
+int drm_sysfs_connector_add(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       int ret = 0, i, j;
+
+       /* We shouldn't get called more than once for the same connector */
+       BUG_ON(device_is_registered(&connector->kdev));
+
+       connector->kdev.parent = &dev->primary->kdev;
+       connector->kdev.class = drm_class;
+       connector->kdev.release = drm_sysfs_device_release;
+
+       DRM_DEBUG("adding \"%s\" to sysfs\n",
+                 drm_get_connector_name(connector));
+
+       dev_set_name(&connector->kdev, "card%d-%s",
+                    dev->primary->index, drm_get_connector_name(connector));
+       ret = device_register(&connector->kdev);
+
+       if (ret) {
+               DRM_ERROR("failed to register connector device: %d\n", ret);
+               goto out;
+       }
+
+       /* Standard attributes */
+
+       for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
+               ret = device_create_file(&connector->kdev, &connector_attrs[i]);
+               if (ret)
+                       goto err_out_files;
+       }
+
+       /* Optional attributes */
+       /*
+        * In the long run it maybe a good idea to make one set of
+        * optionals per connector type.
+        */
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+               case DRM_MODE_CONNECTOR_TV:
+                       for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
+                               ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
+                               if (ret)
+                                       goto err_out_files;
+                       }
+                       break;
+               default:
+                       break;
+       }
+
+       ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+       if (ret)
+               goto err_out_files;
+
+       /* Let userspace know we have a new connector */
+       drm_sysfs_hotplug_event(dev);
+
+       return 0;
+
+err_out_files:
+       if (i > 0)
+               for (j = 0; j < i; j++)
+                       device_remove_file(&connector->kdev,
+                                          &connector_attrs[i]);
+       device_unregister(&connector->kdev);
+
+out:
+       return ret;
+}
+EXPORT_SYMBOL(drm_sysfs_connector_add);
+
+/**
+ * drm_sysfs_connector_remove - remove an connector device from sysfs
+ * @connector: connector to remove
+ *
+ * Remove @connector and its associated attributes from sysfs.  Note that
+ * the device model core will take care of sending the "remove" uevent
+ * at this time, so we don't need to do it.
+ *
+ * Note:
+ * This routine should only be called if the connector was previously
+ * successfully registered.  If @connector hasn't been registered yet,
+ * you'll likely see a panic somewhere deep in sysfs code when called.
+ */
+void drm_sysfs_connector_remove(struct drm_connector *connector)
+{
+       int i;
+
+       DRM_DEBUG("removing \"%s\" from sysfs\n",
+                 drm_get_connector_name(connector));
+
+       for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
+               device_remove_file(&connector->kdev, &connector_attrs[i]);
+       sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
+       device_unregister(&connector->kdev);
+}
+EXPORT_SYMBOL(drm_sysfs_connector_remove);
+
+/**
+ * drm_sysfs_hotplug_event - generate a DRM uevent
+ * @dev: DRM device
+ *
+ * Send a uevent for the DRM device specified by @dev.  Currently we only
+ * set HOTPLUG=1 in the uevent environment, but this could be expanded to
+ * deal with other types of events.
+ */
+void drm_sysfs_hotplug_event(struct drm_device *dev)
+{
+       char *event_string = "HOTPLUG=1";
+       char *envp[] = { event_string, NULL };
+
+       DRM_DEBUG("generating hotplug event\n");
+
+       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+
+/**
+ * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+ * @dev: DRM device to be added
+ * @head: DRM head in question
+ *
+ * Add a DRM device to the DRM's device model class.  We use @dev's PCI device
+ * as the parent for the Linux device, and make sure it has a file containing
+ * the driver we're using (for userspace compatibility).
+ */
+int drm_sysfs_device_add(struct drm_minor *minor)
+{
+       int err;
+       char *minor_str;
+
+       minor->kdev.parent = &minor->dev->pdev->dev;
+       minor->kdev.class = drm_class;
+       minor->kdev.release = drm_sysfs_device_release;
+       minor->kdev.devt = minor->device;
+       minor->kdev.type = &drm_sysfs_device_minor;
+       if (minor->type == DRM_MINOR_CONTROL)
+               minor_str = "controlD%d";
+        else if (minor->type == DRM_MINOR_RENDER)
+                minor_str = "renderD%d";
+        else
+                minor_str = "card%d";
+
+       dev_set_name(&minor->kdev, minor_str, minor->index);
+
+       err = device_register(&minor->kdev);
+       if (err) {
+               DRM_ERROR("device add failed: %d\n", err);
+               goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       return err;
+}
+
+/**
+ * drm_sysfs_device_remove - remove DRM device
+ * @dev: DRM device to remove
+ *
+ * This call unregisters and cleans up a class device that was created with a
+ * call to drm_sysfs_device_add()
+ */
+void drm_sysfs_device_remove(struct drm_minor *minor)
+{
+       device_unregister(&minor->kdev);
+}
+
+
+/**
+ * drm_class_device_register - Register a struct device in the drm class.
+ *
+ * @dev: pointer to struct device to register.
+ *
+ * @dev should have all relevant members pre-filled with the exception
+ * of the class member. In particular, the device_type member must
+ * be set.
+ */
+
+int drm_class_device_register(struct device *dev)
+{
+       dev->class = drm_class;
+       return device_register(dev);
+}
+EXPORT_SYMBOL_GPL(drm_class_device_register);
+
+void drm_class_device_unregister(struct device *dev)
+{
+       return device_unregister(dev);
+}
+EXPORT_SYMBOL_GPL(drm_class_device_unregister);
diff --git a/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_vm.c b/services4/3rdparty/linux_drm/kbuild/tmp_omap3430_linux_release_drm/drm_vm.c
new file mode 100644 (file)
index 0000000..4ac900f
--- /dev/null
@@ -0,0 +1,678 @@
+/**
+ * \file drm_vm.c
+ * Memory mapping for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#if defined(__ia64__)
+#include <linux/efi.h>
+#endif
+
+static void drm_vm_open(struct vm_area_struct *vma);
+static void drm_vm_close(struct vm_area_struct *vma);
+
+static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+       pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__i386__) || defined(__x86_64__)
+       if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+               pgprot_val(tmp) |= _PAGE_PCD;
+               pgprot_val(tmp) &= ~_PAGE_PWT;
+       }
+#elif defined(__powerpc__)
+       pgprot_val(tmp) |= _PAGE_NO_CACHE;
+       if (map_type == _DRM_REGISTERS)
+               pgprot_val(tmp) |= _PAGE_GUARDED;
+#elif defined(__ia64__)
+       if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+                                   vma->vm_start))
+               tmp = pgprot_writecombine(tmp);
+       else
+               tmp = pgprot_noncached(tmp);
+#elif defined(__sparc__)
+       tmp = pgprot_noncached(tmp);
+#endif
+       return tmp;
+}
+
+static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+       pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+       tmp |= _PAGE_NO_CACHE;
+#endif
+       return tmp;
+}
+
+/**
+ * \c fault method for AGP virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Find the right map and if it's AGP memory find the real physical page to
+ * map, get the page, increment the use count and return it.
+ */
+#if __OS_HAS_AGP
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_local_map *map = NULL;
+       struct drm_map_list *r_list;
+       struct drm_hash_item *hash;
+
+       /*
+        * Find the right map
+        */
+       if (!drm_core_has_AGP(dev))
+               goto vm_fault_error;
+
+       if (!dev->agp || !dev->agp->cant_use_aperture)
+               goto vm_fault_error;
+
+       if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
+               goto vm_fault_error;
+
+       r_list = drm_hash_entry(hash, struct drm_map_list, hash);
+       map = r_list->map;
+
+       if (map && map->type == _DRM_AGP) {
+               /*
+                * Using vm_pgoff as a selector forces us to use this unusual
+                * addressing scheme.
+                */
+               resource_size_t offset = (unsigned long)vmf->virtual_address -
+                       vma->vm_start;
+               resource_size_t baddr = map->offset + offset;
+               struct drm_agp_mem *agpmem;
+               struct page *page;
+
+#ifdef __alpha__
+               /*
+                * Adjust to a bus-relative address
+                */
+               baddr -= dev->hose->mem_space->start;
+#endif
+
+               /*
+                * It's AGP memory - find the real physical page to map
+                */
+               list_for_each_entry(agpmem, &dev->agp->memory, head) {
+                       if (agpmem->bound <= baddr &&
+                           agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
+                               break;
+               }
+
+               if (!agpmem)
+                       goto vm_fault_error;
+
+               /*
+                * Get the page, inc the use count, and return it
+                */
+               offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
+               page = agpmem->memory->pages[offset];
+               get_page(page);
+               vmf->page = page;
+
+               DRM_DEBUG
+                   ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
+                    (unsigned long long)baddr,
+                    agpmem->memory->pages[offset],
+                    (unsigned long long)offset,
+                    page_count(page));
+               return 0;
+       }
+vm_fault_error:
+       return VM_FAULT_SIGBUS; /* Disallow mremap */
+}
+#else                          /* __OS_HAS_AGP */
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+#endif                         /* __OS_HAS_AGP */
+
+/**
+ * \c nopage method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Get the mapping, find the real physical page to map, get the page, and
+ * return it.
+ */
+static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_local_map *map = vma->vm_private_data;
+       unsigned long offset;
+       unsigned long i;
+       struct page *page;
+
+       if (!map)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+       i = (unsigned long)map->handle + offset;
+       page = vmalloc_to_page((void *)i);
+       if (!page)
+               return VM_FAULT_SIGBUS;
+       get_page(page);
+       vmf->page = page;
+
+       DRM_DEBUG("shm_fault 0x%lx\n", offset);
+       return 0;
+}
+
+/**
+ * \c close method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Deletes map information if we are the last
+ * person to close a mapping and it's not in the global maplist.
+ */
+static void drm_vm_shm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_vma_entry *pt, *temp;
+       struct drm_local_map *map;
+       struct drm_map_list *r_list;
+       int found_maps = 0;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_dec(&dev->vma_count);
+
+       map = vma->vm_private_data;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+               if (pt->vma->vm_private_data == map)
+                       found_maps++;
+               if (pt->vma == vma) {
+                       list_del(&pt->head);
+                       kfree(pt);
+               }
+       }
+
+       /* We were the only map that was found */
+       if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
+               /* Check to see if we are in the maplist, if we are not, then
+                * we delete this mappings information.
+                */
+               found_maps = 0;
+               list_for_each_entry(r_list, &dev->maplist, head) {
+                       if (r_list->map == map)
+                               found_maps++;
+               }
+
+               if (!found_maps) {
+                       drm_dma_handle_t dmah;
+
+                       switch (map->type) {
+                       case _DRM_REGISTERS:
+                       case _DRM_FRAME_BUFFER:
+                               if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+                                       int retcode;
+                                       retcode = mtrr_del(map->mtrr,
+                                                          map->offset,
+                                                          map->size);
+                                       DRM_DEBUG("mtrr_del = %d\n", retcode);
+                               }
+                               iounmap(map->handle);
+                               break;
+                       case _DRM_SHM:
+                               vfree(map->handle);
+                               break;
+                       case _DRM_AGP:
+                       case _DRM_SCATTER_GATHER:
+                               break;
+                       case _DRM_CONSISTENT:
+                               dmah.vaddr = map->handle;
+                               dmah.busaddr = map->offset;
+                               dmah.size = map->size;
+                               __drm_pci_free(dev, &dmah);
+                               break;
+                       case _DRM_GEM:
+                               DRM_ERROR("tried to rmmap GEM object\n");
+                               break;
+                       }
+                       kfree(map);
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c fault method for DMA virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
+ */
+static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_device_dma *dma = dev->dma;
+       unsigned long offset;
+       unsigned long page_nr;
+       struct page *page;
+
+       if (!dma)
+               return VM_FAULT_SIGBUS; /* Error */
+       if (!dma->pagelist)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
+       page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
+       page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+
+       get_page(page);
+       vmf->page = page;
+
+       DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
+       return 0;
+}
+
+/**
+ * \c fault method for scatter-gather virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
+ */
+static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_local_map *map = vma->vm_private_data;
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_sg_mem *entry = dev->sg;
+       unsigned long offset;
+       unsigned long map_offset;
+       unsigned long page_offset;
+       struct page *page;
+
+       if (!entry)
+               return VM_FAULT_SIGBUS; /* Error */
+       if (!entry->pagelist)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+       map_offset = map->offset - (unsigned long)dev->sg->virtual;
+       page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
+       page = entry->pagelist[page_offset];
+       get_page(page);
+       vmf->page = page;
+
+       return 0;
+}
+
+static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_fault(vma, vmf);
+}
+
+static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_shm_fault(vma, vmf);
+}
+
+static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_dma_fault(vma, vmf);
+}
+
+static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return drm_do_vm_sg_fault(vma, vmf);
+}
+
+/** AGP virtual memory operations */
+static const struct vm_operations_struct drm_vm_ops = {
+       .fault = drm_vm_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/** Shared virtual memory operations */
+static const struct vm_operations_struct drm_vm_shm_ops = {
+       .fault = drm_vm_shm_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_shm_close,
+};
+
+/** DMA virtual memory operations */
+static const struct vm_operations_struct drm_vm_dma_ops = {
+       .fault = drm_vm_dma_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/** Scatter-gather virtual memory operations */
+static const struct vm_operations_struct drm_vm_sg_ops = {
+       .fault = drm_vm_sg_fault,
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/**
+ * \c open method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Create a new drm_vma_entry structure as the \p vma private data entry and
+ * add it to drm_device::vmalist.
+ */
+void drm_vm_open_locked(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_vma_entry *vma_entry;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_inc(&dev->vma_count);
+
+       vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
+       if (vma_entry) {
+               vma_entry->vma = vma;
+               vma_entry->pid = current->pid;
+               list_add(&vma_entry->head, &dev->vmalist);
+       }
+}
+
+static void drm_vm_open(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       drm_vm_open_locked(vma);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c close method for all virtual memory types.
+ *
+ * \param vma virtual memory area.
+ *
+ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+ * free it.
+ */
+static void drm_vm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_vma_entry *pt, *temp;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_dec(&dev->vma_count);
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+               if (pt->vma == vma) {
+                       list_del(&pt->head);
+                       kfree(pt);
+                       break;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * Sets the virtual memory area operations structure to vm_dma_ops, the file
+ * pointer, and calls vm_open().
+ */
+static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev;
+       struct drm_device_dma *dma;
+       unsigned long length = vma->vm_end - vma->vm_start;
+
+       dev = priv->minor->dev;
+       dma = dev->dma;
+       DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+                 vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+       /* Length must match exact page count */
+       if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
+               return -EINVAL;
+       }
+
+       if (!capable(CAP_SYS_ADMIN) &&
+           (dma->flags & _DRM_DMA_USE_PCI_RO)) {
+               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+               pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+               /* Ye gads this is ugly.  With more thought
+                  we could move this up higher and use
+                  `protection_map' instead.  */
+               vma->vm_page_prot =
+                   __pgprot(pte_val
+                            (pte_wrprotect
+                             (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+       }
+
+       vma->vm_ops = &drm_vm_dma_ops;
+
+       vma->vm_flags |= VM_RESERVED;   /* Don't swap */
+       vma->vm_flags |= VM_DONTEXPAND;
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+       return 0;
+}
+
+resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
+{
+       return map->offset;
+}
+
+EXPORT_SYMBOL(drm_core_get_map_ofs);
+
+resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
+{
+#ifdef __alpha__
+       return dev->hose->dense_mem_base - dev->hose->mem_space->start;
+#else
+       return 0;
+#endif
+}
+
+EXPORT_SYMBOL(drm_core_get_reg_ofs);
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the virtual memory area has no offset associated with it then it's a DMA
+ * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
+ * checks that the restricted flag is not set, sets the virtual memory operations
+ * according to the mapping type and remaps the pages. Finally sets the file
+ * pointer and calls vm_open().
+ */
+int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_local_map *map = NULL;
+       resource_size_t offset = 0;
+       struct drm_hash_item *hash;
+
+       DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+                 vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+       if (!priv->authenticated)
+               return -EACCES;
+
+       /* We check for "dma". On Apple's UniNorth, it's valid to have
+        * the AGP mapped at physical address 0
+        * --BenH.
+        */
+       if (!vma->vm_pgoff
+#if __OS_HAS_AGP
+           && (!dev->agp
+               || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
+#endif
+           )
+               return drm_mmap_dma(filp, vma);
+
+       if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
+               DRM_ERROR("Could not find map\n");
+               return -EINVAL;
+       }
+
+       map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+       if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
+               return -EPERM;
+
+       /* Check for valid size. */
+       if (map->size < vma->vm_end - vma->vm_start)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
+               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+               pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+               /* Ye gads this is ugly.  With more thought
+                  we could move this up higher and use
+                  `protection_map' instead.  */
+               vma->vm_page_prot =
+                   __pgprot(pte_val
+                            (pte_wrprotect
+                             (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+       }
+
+       switch (map->type) {
+       case _DRM_AGP:
+               if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+                       /*
+                        * On some platforms we can't talk to bus dma address from the CPU, so for
+                        * memory of type DRM_AGP, we'll deal with sorting out the real physical
+                        * pages and mappings in fault()
+                        */
+#if defined(__powerpc__)
+                       pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+#endif
+                       vma->vm_ops = &drm_vm_ops;
+                       break;
+               }
+               /* fall through to _DRM_FRAME_BUFFER... */
+       case _DRM_FRAME_BUFFER:
+       case _DRM_REGISTERS:
+               offset = dev->driver->get_reg_ofs(dev);
+               vma->vm_flags |= VM_IO; /* not in core dump */
+               vma->vm_page_prot = drm_io_prot(map->type, vma);
+               if (io_remap_pfn_range(vma, vma->vm_start,
+                                      (map->offset + offset) >> PAGE_SHIFT,
+                                      vma->vm_end - vma->vm_start,
+                                      vma->vm_page_prot))
+                       return -EAGAIN;
+               DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
+                         " offset = 0x%llx\n",
+                         map->type,
+                         vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
+               vma->vm_ops = &drm_vm_ops;
+               break;
+       case _DRM_CONSISTENT:
+               /* Consistent memory is really like shared memory. But
+                * it's allocated in a different way, so avoid fault */
+               if (remap_pfn_range(vma, vma->vm_start,
+                   page_to_pfn(virt_to_page(map->handle)),
+                   vma->vm_end - vma->vm_start, vma->vm_page_prot))
+                       return -EAGAIN;
+               vma->vm_page_prot = drm_dma_prot(map->type, vma);
+       /* fall through to _DRM_SHM */
+       case _DRM_SHM:
+               vma->vm_ops = &drm_vm_shm_ops;
+               vma->vm_private_data = (void *)map;
+               /* Don't let this area swap.  Change when
+                  DRM_KERNEL advisory is supported. */
+               vma->vm_flags |= VM_RESERVED;
+               break;
+       case _DRM_SCATTER_GATHER:
+               vma->vm_ops = &drm_vm_sg_ops;
+               vma->vm_private_data = (void *)map;
+               vma->vm_flags |= VM_RESERVED;
+               vma->vm_page_prot = drm_dma_prot(map->type, vma);
+               break;
+       default:
+               return -EINVAL; /* This should never happen. */
+       }
+       vma->vm_flags |= VM_RESERVED;   /* Don't swap */
+       vma->vm_flags |= VM_DONTEXPAND;
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+       return 0;
+}
+
+int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_mmap_locked(filp, vma);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_mmap);
similarity index 80%
rename from services4/srvkm/hwdefs/sgxcoretypes.h
rename to services4/3rdparty/linux_drm/pvr_drm_mod.h
index 6dc26ef..32d6bb1 100644 (file)
  *
  ******************************************************************************/
 
-#ifndef _SGXCORETYPES_KM_H_
-#define _SGXCORETYPES_KM_H_
+#ifndef        __PVR_DRM_MOD_H__
+#define        __PVR_DRM_MOD_H__
 
-typedef enum
-{
-       SGX_CORE_ID_INVALID = 0,
-       SGX_CORE_ID_530 = 2,
-       SGX_CORE_ID_535 = 3,
-} SGX_CORE_ID_TYPE;
+int drm_pvr_dev_add(void);
 
-typedef struct _SGX_CORE_INFO
-{
-       SGX_CORE_ID_TYPE        eID;
-       IMG_UINT32                      uiRev;
-} SGX_CORE_INFO, *PSGX_CORE_INFO;
-
-#endif 
+void drm_pvr_dev_remove(void);
 
+#endif 
diff --git a/services4/3rdparty/linux_drm/pvr_drm_stubs.c b/services4/3rdparty/linux_drm/pvr_drm_stubs.c
new file mode 100644 (file)
index 0000000..0688bf5
--- /dev/null
@@ -0,0 +1,193 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <asm/system.h>
+
+#include "pvr_drm_mod.h"
+
+#define        DRV_MSG_PREFIX_STR "pvr drm: "
+
+#define        SGX_VENDOR_ID           1
+#define        SGX_DEVICE_ID           1
+#define        SGX_SUB_VENDOR_ID       1
+#define        SGX_SUB_DEVICE_ID       1
+
+#if defined(DEBUG)
+#define        DEBUG_PRINTK(format, args...) printk(format, ## args)
+#else
+#define        DEBUG_PRINTK(format, args...)
+#endif
+
+#define        CLEAR_STRUCT(x) memset(&(x), 0, sizeof(x))
+
+static struct pci_bus pvr_pci_bus;
+static struct pci_dev pvr_pci_dev;
+
+static bool bDeviceIsRegistered;
+
+static void
+release_device(struct device *dev)
+{
+}
+
+int
+drm_pvr_dev_add(void)
+{
+       int ret;
+
+       DEBUG_PRINTK(KERN_INFO DRV_MSG_PREFIX_STR "%s\n", __FUNCTION__);
+
+       if (bDeviceIsRegistered)
+       {
+               DEBUG_PRINTK(KERN_WARNING DRV_MSG_PREFIX_STR "%s: Device already registered\n", __FUNCTION__);
+               return 0;
+       }
+
+       
+       pvr_pci_dev.vendor = SGX_VENDOR_ID;
+       pvr_pci_dev.device = SGX_DEVICE_ID;
+       pvr_pci_dev.subsystem_vendor = SGX_SUB_VENDOR_ID;
+       pvr_pci_dev.subsystem_device = SGX_SUB_DEVICE_ID;
+
+       
+       pvr_pci_dev.bus = &pvr_pci_bus;
+
+       dev_set_name(&pvr_pci_dev.dev, "%s", "SGX");
+       pvr_pci_dev.dev.release = release_device;
+
+       ret = device_register(&pvr_pci_dev.dev);
+       if (ret != 0)
+       {
+               printk(KERN_ERR DRV_MSG_PREFIX_STR "%s: device_register failed (%d)\n", __FUNCTION__, ret);
+       }
+
+       bDeviceIsRegistered = true;
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_pvr_dev_add);
+
+void
+drm_pvr_dev_remove(void)
+{
+       DEBUG_PRINTK(KERN_INFO DRV_MSG_PREFIX_STR "%s\n", __FUNCTION__);
+
+       if (bDeviceIsRegistered)
+       {
+               DEBUG_PRINTK(KERN_INFO DRV_MSG_PREFIX_STR "%s: Unregistering device\n", __FUNCTION__);
+
+               device_unregister(&pvr_pci_dev.dev);
+               bDeviceIsRegistered = false;
+
+               
+               CLEAR_STRUCT(pvr_pci_dev);
+               CLEAR_STRUCT(pvr_pci_bus);
+       }
+       else
+       {
+               DEBUG_PRINTK(KERN_WARNING DRV_MSG_PREFIX_STR "%s: Device not registered\n", __FUNCTION__);
+       }
+}
+EXPORT_SYMBOL(drm_pvr_dev_remove);
+
+void
+pci_disable_device(struct pci_dev *dev)
+{
+}
+
+struct pci_dev *
+pci_dev_get(struct pci_dev *dev)
+{
+       return dev;
+}
+
+void
+pci_set_master(struct pci_dev *dev)
+{
+}
+
+#define        PCI_ID_COMP(field, value) (((value) == PCI_ANY_ID) || \
+                       ((field) == (value)))
+
+struct pci_dev *
+pci_get_subsys(unsigned int vendor, unsigned int device,
+       unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from)
+{
+       if (from == NULL &&
+               PCI_ID_COMP(pvr_pci_dev.vendor, vendor) &&
+               PCI_ID_COMP(pvr_pci_dev.device, device) &&
+               PCI_ID_COMP(pvr_pci_dev.subsystem_vendor, ss_vendor) &&
+               PCI_ID_COMP(pvr_pci_dev.subsystem_device, ss_device))
+       {
+                       DEBUG_PRINTK(KERN_INFO DRV_MSG_PREFIX_STR "%s: Found %x %x %x %x\n", __FUNCTION__, vendor, device, ss_vendor, ss_device);
+
+                       return &pvr_pci_dev;
+       }
+
+       if (from == NULL)
+       {
+               DEBUG_PRINTK(KERN_INFO DRV_MSG_PREFIX_STR "%s: Couldn't find %x %x %x %x\n", __FUNCTION__, vendor, device, ss_vendor, ss_device);
+       }
+
+       return NULL;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
+int
+pci_set_dma_mask(struct pci_dev *dev, u64 mask)
+{
+       return 0;
+}
+#endif
+
+void
+pci_unregister_driver(struct pci_driver *drv)
+{
+}
+
+int
+__pci_register_driver(struct pci_driver *drv, struct module *owner,
+       const char *mod_name)
+{
+       return 0;
+}
+
+int
+pci_enable_device(struct pci_dev *dev)
+{
+       return 0;
+}
+
+void
+__bad_cmpxchg(volatile void *ptr, int size)
+{
+       printk(KERN_ERR DRV_MSG_PREFIX_STR "%s: ptr %p size %u\n",
+               __FUNCTION__, ptr, size);
+}
+
index eaef12f..2deaa7a 100644 (file)
@@ -94,6 +94,20 @@ typedef struct _SGX_BRIDGE_INIT_INFO_
 #if defined(FIX_HW_BRN_29823)
        IMG_HANDLE      hKernelDummyTermStreamMemInfo;
 #endif
+
+#if defined(FIX_HW_BRN_31542)
+       IMG_HANDLE hKernelClearClipWAVDMStreamMemInfo;
+       IMG_HANDLE hKernelClearClipWAIndexStreamMemInfo;
+       IMG_HANDLE hKernelClearClipWAPDSMemInfo;
+       IMG_HANDLE hKernelClearClipWAUSEMemInfo;
+       IMG_HANDLE hKernelClearClipWAParamMemInfo;
+       IMG_HANDLE hKernelClearClipWAPMPTMemInfo;
+       IMG_HANDLE hKernelClearClipWADPMLSSMemInfo;
+       IMG_HANDLE hKernelClearClipWATPCMemInfo;
+       IMG_HANDLE hKernelClearClipWAPSGRgnHdrMemInfo;
+       IMG_HANDLE hKernelClearClipWAPartiPIMMemInfo;
+#endif
+
 #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
        IMG_HANDLE      hKernelEDMStatusBufferMemInfo;
 #endif
@@ -175,6 +189,7 @@ typedef struct _SGX_CCB_KICK_
 #if (defined(NO_HARDWARE) || defined(PDUMP))
        IMG_BOOL        bTerminateOrAbort;
 #endif
+       IMG_BOOL        bLastInScene;
 
        
        IMG_UINT32      ui32CCBOffset;
index 55e5647..b727ed9 100644 (file)
@@ -3096,7 +3096,7 @@ static PVRSRV_ERROR ModifyCompleteSyncOpsCallBack(IMG_PVOID               pvParam,
                                goto OpFlushedComplete;
                        }
                        PVR_DPF((PVR_DBG_WARNING, "ModifyCompleteSyncOpsCallBack: waiting for current Ops to flush"));
-                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+                       OSSleepms(1);
                } END_LOOP_UNTIL_TIMEOUT();
                
                PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: timeout whilst waiting for current Ops to flush."));
index e1b7320..7e5a63c 100644 (file)
@@ -961,6 +961,89 @@ SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
        }
 #endif
 
+#if defined(FIX_HW_BRN_31542)
+       eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDummy,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bLookupFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDummy,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bLookupFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDummy,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bLookupFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDummy,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bLookupFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDummy,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bLookupFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDummy,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bLookupFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDummy,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWADPMLSSMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bLookupFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDummy,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bLookupFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDummy,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bLookupFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDummy,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPartiPIMMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bLookupFailed = IMG_TRUE;
+       }
+#endif
+
 #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
        eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
                                                   &hDummy,
@@ -1128,6 +1211,90 @@ SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
        }
 #endif
 
+
+#if defined(FIX_HW_BRN_31542)
+       eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+                                                  &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bReleaseFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+                                                  &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bReleaseFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+                                                  &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bReleaseFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+                                                  &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bReleaseFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+                                                  &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bReleaseFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+                                                  &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bReleaseFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+                                                  &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWADPMLSSMemInfo,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWADPMLSSMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bReleaseFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+                                                  &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bReleaseFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+                                                  &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bReleaseFailed = IMG_TRUE;
+       }
+       eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+                                                  &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPartiPIMMemInfo,
+                                                  psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPartiPIMMemInfo,
+                                                  PVRSRV_HANDLE_TYPE_MEM_INFO);
+       if (eError != PVRSRV_OK)
+       {
+               bReleaseFailed = IMG_TRUE;
+       }
+#endif
+
 #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
        eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
                                                   &psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
@@ -1251,6 +1418,29 @@ SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
        bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
 #endif
 
+#if defined(FIX_HW_BRN_31542)
+       eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo);
+       bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+       eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo);
+       bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+       eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo);
+       bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+       eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo);
+       bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+       eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo);
+       bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+       eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo);
+       bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+       eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWADPMLSSMemInfo);
+       bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+       eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo);
+       bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+       eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo);
+       bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+       eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPartiPIMMemInfo);
+       bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+
 #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
        eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo);
        bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
index ee5cabd..fcba425 100644 (file)
@@ -77,6 +77,8 @@ IMG_UINT32 PVRSRVTimeNow(IMG_VOID)
 
 static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID)
 {
+
+
        IMG_UINT32 ui32Time1, ui32Time2;
 
        ui32Time1 = PVRSRVTimeNow();
@@ -93,6 +95,7 @@ static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID)
 
 IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo)
 {
+
        IMG_UINT32 ui32Loop;
 
        PVR_UNREFERENCED_PARAMETER(pvDevInfo);
index 6e19974..9194966 100644 (file)
@@ -613,32 +613,44 @@ PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
 
 
 IMG_EXPORT
-PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
-                                                                                 IMG_UINT32 ui32Value,
-                                                                                 IMG_UINT32 ui32Mask,
-                                                                                 IMG_UINT32 ui32Waitus,
-                                                                                 IMG_UINT32 ui32Tries)
+PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
+                                                                                 IMG_UINT32                    ui32Value,
+                                                                                 IMG_UINT32                    ui32Mask,
+                                                                                 IMG_UINT32                    ui32Timeoutus,
+                                                                                 IMG_UINT32                    ui32PollPeriodus,
+                                                                                 IMG_BOOL                              bAllowPreemption)
 {
        {
                IMG_UINT32      ui32ActualValue = 0xFFFFFFFFU; 
-               IMG_UINT32      uiMaxTime = ui32Tries * ui32Waitus;
+
+               if (bAllowPreemption)
+               {
+                       PVR_ASSERT(ui32PollPeriodus >= 1000);
+               }
 
                 
-               LOOP_UNTIL_TIMEOUT(uiMaxTime)
+               LOOP_UNTIL_TIMEOUT(ui32Timeoutus)
                {
                        ui32ActualValue = (*pui32LinMemAddr & ui32Mask);
                        if(ui32ActualValue == ui32Value)
                        {
                                return PVRSRV_OK;
                        }
-                       OSWaitus(ui32Waitus);
+                       
+                       if (bAllowPreemption)
+                       {
+                               OSSleepms(ui32PollPeriodus / 1000);
+                       }
+                       else
+                       {
+                               OSWaitus(ui32PollPeriodus);
+                       }
                } END_LOOP_UNTIL_TIMEOUT();
        
                PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).",
                                ui32Value, ui32ActualValue, ui32Mask));
        }
 
-
        return PVRSRV_ERROR_TIMEOUT;
 }
 
index afd30a8..b6c4cda 100644 (file)
@@ -342,7 +342,7 @@ PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueue
                        bTimeout = IMG_FALSE;
                        break;
                }
-               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               OSSleepms(1);
        } END_LOOP_UNTIL_TIMEOUT();
 
        if (bTimeout)
@@ -460,7 +460,7 @@ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
                        bTimeout = IMG_FALSE;
                        break;
                }
-               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               OSSleepms(1);
        } END_LOOP_UNTIL_TIMEOUT();
 
        if (bTimeout == IMG_TRUE)
index a0ca3e9..f8e467d 100644 (file)
 #endif 
 
 #if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28
-       #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+
+#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
        #define SGX_GENERAL_MAPPING_HEAP_BASE            0x00001000
        #define SGX_GENERAL_MAPPING_HEAP_SIZE           (0x01800000-0x00001000-0x00001000)
-       #endif
-               
+
        #define SGX_GENERAL_HEAP_BASE                            0x01800000
        #define SGX_GENERAL_HEAP_SIZE                           (0x07000000-0x00001000)
 
+#else
+       #define SGX_GENERAL_HEAP_BASE                            0x00001000
+       #define SGX_GENERAL_HEAP_SIZE                           (0x08800000-0x00001000-0x00001000)
+#endif
+       
        #define SGX_3DPARAMETERS_HEAP_BASE                       0x08800000
        #define SGX_3DPARAMETERS_HEAP_SIZE                      (0x04000000-0x00001000)
 
index deb8a84..c898ab2 100644 (file)
@@ -294,7 +294,7 @@ static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND
 //Function to configure PLL clocks. Only required for TI814x. For other devices its taken care in u-boot.
 void PLL_Clocks_Config(UWORD32 Base_Address,UWORD32 OSC_FREQ,UWORD32 N,UWORD32 M,UWORD32 M2,UWORD32 CLKCTRL_VAL)
 {
-        UWORD32 m2nval,mn2val,read_clkctrl,clk_out,ref_clk,clkout_dco = 0;
+        UWORD32 m2nval,mn2val,read_clkctrl;
         m2nval = (M2<<16) | N;
         mn2val =  M;
        WR_MEM_32((Base_Address+M2NDIV    ),m2nval);
@@ -402,13 +402,13 @@ PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO     *psDevInfo,
             div_base = ioremap(SGX_TI81xx_CLK_DVDR_ADDR,0x100);
             WR_MEM_32((div_base),0x0);
             pll_base = ioremap(SGX_PLL_BASE,0x100);
-           PLL_Clocks_Config(pll_base,OSC_0,19,800,4,ADPLLJ_CLKCRTL_HS2);
+           PLL_Clocks_Config((UWORD32)pll_base,OSC_0,19,800,4,ADPLLJ_CLKCRTL_HS2);
             iounmap (div_base);
             iounmap (pll_base);
         }
 
 #else
-        if(cpu_is_omap3630())
+        if(cpu_is_omap3630() || cpu_is_omap44xx())
                 OSWriteHWReg(psDevInfo->pvRegsBaseKM, 0xFF08, 0x80000000);//OCP Bypass mode
 #endif
 
@@ -466,13 +466,20 @@ PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO     *psDevInfo,
 #endif 
 
 #if !defined(NO_HARDWARE)
+
+       if (PollForValueKM(&psSGXHostCtl->ui32InitStatus,
+                                           PVRSRV_USSE_EDM_INIT_COMPLETE,
+                                           PVRSRV_USSE_EDM_INIT_COMPLETE,
+                                           MAX_HW_TIME_US,
+                                           MAX_HW_TIME_US/WAIT_TRY_COUNT,
+                                           IMG_FALSE) != PVRSRV_OK)
        
 
-       if (PollForValueKM(&psSGXHostCtl->ui32InitStatus,
-                                          PVRSRV_USSE_EDM_INIT_COMPLETE,
-                                          PVRSRV_USSE_EDM_INIT_COMPLETE,
-                                          MAX_HW_TIME_US/WAIT_TRY_COUNT,
-                                          WAIT_TRY_COUNT) != PVRSRV_OK)
+//     if (PollForValueKM(&psSGXHostCtl->ui32InitStatus,
+//                                        PVRSRV_USSE_EDM_INIT_COMPLETE,
+//                                        PVRSRV_USSE_EDM_INIT_COMPLETE,
+//                                        MAX_HW_TIME_US/WAIT_TRY_COUNT,
+//                                        WAIT_TRY_COUNT) != PVRSRV_OK)
        {
                PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel initialisation failed"));
                #if !defined(FIX_HW_BRN_23281)
@@ -2067,11 +2074,19 @@ PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO   *psDevInfo,
        sCommandData.ui32Data[1] = psMemInfo->sDevVAddr.uiAddr; 
 
        PDUMPCOMMENT("Microkernel kick for SGXGetMiscInfo");
-       eError = SGXScheduleCCBCommandKM(psDeviceNode,
+/*     eError = SGXScheduleCCBCommandKM(psDeviceNode,
                                                                         SGXMKIF_CMD_GETMISCINFO,
                                                                         &sCommandData,
                                                                         KERNEL_ID,
                                                                         0);
+*/
+
+      eError = SGXScheduleCCBCommandKM(psDeviceNode,
+                                                                         SGXMKIF_CMD_GETMISCINFO,
+                                                                         &sCommandData,
+                                                                         KERNEL_ID,
+                                                                         0,
+                                                                         IMG_FALSE);
 
        if (eError != PVRSRV_OK)
        {
@@ -2177,11 +2192,19 @@ PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO        *psDevInfo,
                        psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0;
 
                        PDUMPCOMMENT("Microkernel kick for setting a data breakpoint");
-                       eError = SGXScheduleCCBCommandKM(psDeviceNode,
+/*                     eError = SGXScheduleCCBCommandKM(psDeviceNode,
                                                                                         SGXMKIF_CMD_DATABREAKPOINT,
                                                                                         &sCommandData,
                                                                                         KERNEL_ID,
                                                                                         0);
+*/
+                        eError = SGXScheduleCCBCommandKM(psDeviceNode,
+                                                                                         SGXMKIF_CMD_DATABREAKPOINT,
+                                                                                         &sCommandData,
+                                                                                         KERNEL_ID,
+                                                                                         0,
+                                                                                         IMG_FALSE);
+
 
                        if (eError != PVRSRV_OK)
                        {
@@ -2558,11 +2581,19 @@ PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO        *psDevInfo,
 
                        
                        sCommandData.ui32Data[0] = psSetHWPerfStatus->ui32NewHWPerfStatus;
-                       eError = SGXScheduleCCBCommandKM(psDeviceNode,
+/*                     eError = SGXScheduleCCBCommandKM(psDeviceNode,
                                                                                         SGXMKIF_CMD_SETHWPERFSTATUS,
                                                                                         &sCommandData,
                                                                                         KERNEL_ID,
                                                                                         0);
+*/
+                        eError = SGXScheduleCCBCommandKM(psDeviceNode,
+                                                                                         SGXMKIF_CMD_SETHWPERFSTATUS,
+                                                                                         &sCommandData,
+                                                                                         KERNEL_ID,
+                                                                                         0,
+                                                                                         IMG_FALSE);
+
                        return eError;
                }
 #endif 
index 8a229c9..581640b 100644 (file)
@@ -550,7 +550,7 @@ PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK *psCCBKick)
        }
 #endif 
 
-       eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TA, &psCCBKick->sCommand, KERNEL_ID, 0);
+       eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TA, &psCCBKick->sCommand, KERNEL_ID, 0, psCCBKick->bLastInScene);
        if (eError == PVRSRV_ERROR_RETRY)
        {
                if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
index aeac6e3..427cb50 100644 (file)
@@ -197,8 +197,9 @@ static IMG_VOID SGXPollForClockGating (PVRSRV_SGXDEV_INFO   *psDevInfo,
        if (PollForValueKM((IMG_UINT32 *)psDevInfo->pvRegsBaseKM + (ui32Register >> 2),
                                                0,
                                                ui32RegisterValue,
+                                               MAX_HW_TIME_US,
                                                MAX_HW_TIME_US/WAIT_TRY_COUNT,
-                                               WAIT_TRY_COUNT) != PVRSRV_OK)
+                                               IMG_FALSE) != PVRSRV_OK)
        {
                PVR_DPF((PVR_DBG_ERROR,"SGXPollForClockGating: %s failed.", pszComment));
                PVR_DBG_BREAK;
@@ -251,7 +252,7 @@ PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE                           hDevHandle,
 
                sCommand.ui32Data[1] = ui32PowerCmd;
 
-               eError = SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER, &sCommand, KERNEL_ID, 0);
+               eError = SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER, &sCommand, KERNEL_ID, 0, IMG_FALSE);
                if (eError != PVRSRV_OK)
                {
                        PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to submit power down command"));
@@ -263,8 +264,9 @@ PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE                           hDevHandle,
                if (PollForValueKM(&psDevInfo->psSGXHostCtl->ui32PowerStatus,
                                                        ui32CompleteStatus,
                                                        ui32CompleteStatus,
+                                                       MAX_HW_TIME_US,
                                                        MAX_HW_TIME_US/WAIT_TRY_COUNT,
-                                                       WAIT_TRY_COUNT) != PVRSRV_OK)
+                                                       IMG_FALSE) != PVRSRV_OK)
                {
                        PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for SGX ukernel power transition failed."));
                        PVR_DBG_BREAK;
@@ -371,7 +373,7 @@ PVRSRV_ERROR SGXPostPowerState (IMG_HANDLE                          hDevHandle,
                        SGXMKIF_COMMAND         sCommand = {0};
 
                        sCommand.ui32Data[1] = PVRSRV_POWERCMD_RESUME;
-                       eError = SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER, &sCommand, ISR_ID, 0);
+                       eError = SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER, &sCommand, ISR_ID, 0, IMG_FALSE);
                        if (eError != PVRSRV_OK)
                        {
                                PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState failed to schedule CCB command: %u", eError));
index 68d0e79..847ca24 100644 (file)
@@ -199,8 +199,9 @@ static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO  *psDevInfo,
                if (PollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT),
                                                        0,
                                                        EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
+                                                       MAX_HW_TIME_US,
                                                        MAX_HW_TIME_US/WAIT_TRY_COUNT,
-                                                       WAIT_TRY_COUNT) != PVRSRV_OK)
+                                                       IMG_FALSE) != PVRSRV_OK)
                {
                        PVR_DPF((PVR_DBG_ERROR,"Wait for DC invalidate failed."));
                        PVR_DBG_BREAK;
index 0e9fe81..1624eb2 100644 (file)
@@ -229,7 +229,7 @@ IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSF
 
        sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr;
        
-       eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TRANSFER, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags);
+       eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TRANSFER, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags, IMG_FALSE);
 
        if (eError == PVRSRV_ERROR_RETRY)
        {
@@ -491,7 +491,7 @@ IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK
 
        sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr;
        
-       eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_2D, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags);     
+       eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_2D, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags, IMG_FALSE);  
 
        if (eError == PVRSRV_ERROR_RETRY)
        {
index d8b6de7..6952571 100644 (file)
@@ -142,7 +142,7 @@ static INLINE SGXMKIF_COMMAND * SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO *psC
                        return &psCCB->psCommands[*psCCB->pui32WriteOffset];
                }
 
-               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               OSSleepms(1);
        } END_LOOP_UNTIL_TIMEOUT();
 
        
@@ -153,12 +153,12 @@ PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO     *psDevInfo,
                                                                   SGXMKIF_CMD_TYPE             eCmdType,
                                                                   SGXMKIF_COMMAND              *psCommandData,
                                                                   IMG_UINT32                   ui32CallerID,
-                                                                  IMG_UINT32                   ui32PDumpFlags)
+                                                                  IMG_UINT32                   ui32PDumpFlags,
+                                                                  IMG_BOOL                     bLastInScene)
 {
        PVRSRV_SGX_CCB_INFO *psKernelCCB;
        PVRSRV_ERROR eError = PVRSRV_OK;
        SGXMKIF_COMMAND *psSGXCommand;
-       SYS_DATA *psSysData;
 #if defined(PDUMP)
        IMG_VOID *pvDumpCommand;
        IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended();
@@ -187,7 +187,8 @@ PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO       *psDevInfo,
                                                                           SGXMKIF_CMD_PROCESS_QUEUES,
                                                                           &sCacheCommand,
                                                                           ui32CallerID,
-                                                                          ui32PDumpFlags);
+                                                                          ui32PDumpFlags,
+                                                                          bLastInScene);
                if (eError != PVRSRV_OK)
                {
                        goto Exit;
@@ -198,8 +199,9 @@ PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO       *psDevInfo,
                if(PollForValueKM(&psSGXHostCtl->ui32InvalStatus,
                                                  PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE,
                                                  PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE,
-                                                 2 * MAX_HW_TIME_US/WAIT_TRY_COUNT,
-                                                 WAIT_TRY_COUNT) != PVRSRV_OK)
+                                                 2 * MAX_HW_TIME_US,
+                                                 MAX_HW_TIME_US/WAIT_TRY_COUNT,
+                                                 IMG_FALSE) != PVRSRV_OK)
                {
                        PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommand: Wait for uKernel to Invalidate BIF cache failed"));
                        PVR_DBG_BREAK;
@@ -265,20 +267,25 @@ PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO     *psDevInfo,
                goto Exit;
        }
 
-       
-       SysAcquireData(&psSysData);
-
-       if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH)
-       {
-               OSFlushCPUCacheKM();
-       }
-       else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN)
+       if((eCmdType == SGXMKIF_CMD_TA) && bLastInScene)
        {
-               OSCleanCPUCacheKM();
-       }
+               SYS_DATA *psSysData;
 
-       
-       psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE;
+               
+               SysAcquireData(&psSysData);
+
+               if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) 
+               {
+                       OSFlushCPUCacheKM();
+               }
+               else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN)
+               {
+                       OSCleanCPUCacheKM();
+               }
+
+               
+               psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE;
+       }
 
        PVR_ASSERT(eCmdType < SGXMKIF_CMD_MAX);
        psSGXCommand->ui32ServiceAddress = psDevInfo->aui32HostKickAddr[eCmdType];       
@@ -330,8 +337,9 @@ PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO       *psDevInfo,
        eError = PollForValueKM (psKernelCCB->pui32ReadOffset,
                                                                *psKernelCCB->pui32WriteOffset,
                                                                0xFF,
+                                                               MAX_HW_TIME_US,
                                                                MAX_HW_TIME_US/WAIT_TRY_COUNT,
-                                                               WAIT_TRY_COUNT);
+                                                               IMG_FALSE);
        if (eError != PVRSRV_OK)
        {
                eError = PVRSRV_ERROR_TIMEOUT;
@@ -418,7 +426,8 @@ PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE             *psDeviceNode,
                                                                         SGXMKIF_CMD_TYPE               eCmdType,
                                                                         SGXMKIF_COMMAND                *psCommandData,
                                                                         IMG_UINT32                             ui32CallerID,
-                                                                        IMG_UINT32                             ui32PDumpFlags)
+                                                                        IMG_UINT32                             ui32PDumpFlags,
+                                                                        IMG_BOOL                               bLastInScene)
 {
        PVRSRV_ERROR            eError;
        PVRSRV_SGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
@@ -465,7 +474,7 @@ PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE             *psDeviceNode,
                return eError;
        }
 
-       eError = SGXScheduleCCBCommand(psDevInfo, eCmdType, psCommandData, ui32CallerID, ui32PDumpFlags);
+       eError = SGXScheduleCCBCommand(psDevInfo, eCmdType, psCommandData, ui32CallerID, ui32PDumpFlags, bLastInScene);
 
        PVRSRVPowerUnlock(ui32CallerID);
 
@@ -497,7 +506,7 @@ PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode)
                return PVRSRV_OK;
        }
 
-       eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_PROCESS_QUEUES, &sCommand, ISR_ID, 0);
+       eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_PROCESS_QUEUES, &sCommand, ISR_ID, 0, IMG_FALSE);
        if (eError != PVRSRV_OK)
        {
                PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueuesKM failed to schedule CCB command: %u", eError));
@@ -551,7 +560,7 @@ IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE       *psDeviceNode,
                sCommand.ui32Data[0] = ui32CleanupType;
                sCommand.ui32Data[1] = (psHWDataDevVAddr == IMG_NULL) ? 0 : psHWDataDevVAddr->uiAddr;
 
-               eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CLEANUP, &sCommand, KERNEL_ID, 0);
+               eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CLEANUP, &sCommand, KERNEL_ID, 0, IMG_FALSE);
                if (eError != PVRSRV_OK)
                {
                        PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Failed to submit clean-up command"));
@@ -563,8 +572,9 @@ IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE       *psDeviceNode,
                if(PollForValueKM(&psSGXHostCtl->ui32CleanupStatus,
                                                  PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
                                                  PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
-                                                 2 * MAX_HW_TIME_US/WAIT_TRY_COUNT,
-                                                 WAIT_TRY_COUNT) != PVRSRV_OK)
+                                                 10 * MAX_HW_TIME_US,
+                                                 1000,
+                                                 IMG_TRUE) != PVRSRV_OK)
                {
                        PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Wait for uKernel to clean up (%u) failed", ui32CleanupType));
                        PVR_DBG_BREAK;
@@ -943,7 +953,7 @@ PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO   *psDevInfo,
 
        LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
        {
-               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               OSSleepms(1);
 
                if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
                {
@@ -952,7 +962,7 @@ PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO   *psDevInfo,
                        return PVRSRV_OK;
                }
 
-               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               OSSleepms(1);
        } END_LOOP_UNTIL_TIMEOUT();
 
        
index aeee635..abf9dc2 100644 (file)
@@ -46,13 +46,15 @@ PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO       *psDevInfo,
                                                                   SGXMKIF_CMD_TYPE             eCommandType,
                                                                   SGXMKIF_COMMAND              *psCommandData,
                                                                   IMG_UINT32                   ui32CallerID,
-                                                                  IMG_UINT32                   ui32PDumpFlags);
+                                                                  IMG_UINT32                   ui32PDumpFlags,
+                                                                  IMG_BOOL                     bLastInScene);
 IMG_IMPORT
 PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE                *psDeviceNode,
                                                                         SGXMKIF_CMD_TYPE               eCommandType,
                                                                         SGXMKIF_COMMAND                *psCommandData,
                                                                         IMG_UINT32                             ui32CallerID,
-                                                                        IMG_UINT32                             ui32PDumpFlags);
+                                                                        IMG_UINT32                             ui32PDumpFlags,
+                                                                        IMG_BOOL                               bLastInScene);
 
 IMG_IMPORT
 PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode);
index 943c2bd..c42eadc 100644 (file)
@@ -46,7 +46,7 @@
        #if defined(__arm__) || defined(__sh__)
                #define PGPROT_WC(pv)   pgprot_writecombine(pv)
        #else
-               #if defined(__i386__)
+               #if defined(__i386__) || defined(__mips__)
                        #define PGPROT_WC(pv)   pgprot_noncached(pv)
                #else
                        #define PGPROT_WC(pv)   pgprot_noncached(pv)
index 81970c6..e50bae7 100644 (file)
@@ -132,6 +132,17 @@ PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOI
     return PVRSRV_OK;
 }
 
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
+
+static inline int is_vmalloc_addr(const void *pvCpuVAddr)
+{
+        unsigned long lAddr = (unsigned long)pvCpuVAddr;
+        return lAddr >= VMALLOC_START && lAddr < VMALLOC_END;
+}
+
+#endif
+
+
        
 #if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
 PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc)
@@ -518,6 +529,11 @@ IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus)
     udelay(ui32Timeus);
 }
 
+IMG_VOID OSSleepms(IMG_UINT32 ui32Timems)
+{
+    msleep(ui32Timems);
+}
+
 
 IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID)
 {
@@ -2705,15 +2721,69 @@ IMG_BOOL CheckExecuteCacheOp(IMG_HANDLE hOSMemHandle,
        {
                case LINUX_MEM_AREA_VMALLOC:
                {
-                       pvMinVAddr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress + ui32AreaOffset;
+                if(is_vmalloc_addr(pvRangeAddrStart))
+                        {
+       
+                              pvMinVAddr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress + ui32AreaOffset;
+
+                                if(pvRangeAddrStart < pvMinVAddr)
+                                        goto err_blocked;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+        if (map_unmap == 0 ) {
+
+           pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+
+        } else if (map_unmap == 1) {
+
+           pfnInnerCacheOp_mapUnamp (pvRangeAddrStart, ui32Length, DMA_TO_DEVICE);
+
+        } else {
+
+           pfnInnerCacheOp_mapUnamp (pvRangeAddrStart, ui32Length, DMA_FROM_DEVICE);
+
+        }
+#else
+pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+#endif
+
+//                                pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+                        }
+
+               else
+                {
+
+                                pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList,pvRangeAddrStart, ui32Length);
+                                if(!pvMinVAddr)
+                                        goto err_blocked;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+        if (map_unmap == 0 ) {
+
+           pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+
+        } else if (map_unmap == 1) {
+
+           pfnInnerCacheOp_mapUnamp (pvRangeAddrStart, ui32Length, DMA_TO_DEVICE);
+
+        } else {
+
+           pfnInnerCacheOp_mapUnamp (pvRangeAddrStart, ui32Length, DMA_FROM_DEVICE);
+
+        }
+#else
+pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+#endif
 
+
+//                                pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
                        
-                       if(pvRangeAddrStart < pvMinVAddr &&
-                          ui32AreaOffset + ui32Length > ui32AreaLength)
-                               goto err_blocked;
 
 #if defined(CONFIG_OUTER_CACHE)
+                               pvRangeAddrStart = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress +(ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr);
+                  }
+
                        pfnMemAreaToPhys = VMallocAreaToPhys;
+#else
+}
 #endif
                        break;
                }
@@ -2742,6 +2812,26 @@ IMG_BOOL CheckExecuteCacheOp(IMG_HANDLE hOSMemHandle,
                                                                                   pvRangeAddrStart, ui32Length);
                        if(!pvMinVAddr)
                                goto err_blocked;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+        if (map_unmap == 0 ) {
+
+           pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+
+        } else if (map_unmap == 1) {
+
+           pfnInnerCacheOp_mapUnamp (pvRangeAddrStart, ui32Length, DMA_TO_DEVICE);
+
+        } else {
+
+           pfnInnerCacheOp_mapUnamp (pvRangeAddrStart, ui32Length, DMA_FROM_DEVICE);
+
+        }
+#else
+pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+#endif
+
+//                     pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+
 
 #if defined(CONFIG_OUTER_CACHE)
                        ui32PageNumOffset = ((ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr)) >> PAGE_SHIFT;
@@ -2756,6 +2846,25 @@ IMG_BOOL CheckExecuteCacheOp(IMG_HANDLE hOSMemHandle,
                                                                                   pvRangeAddrStart, ui32Length);
                        if(!pvMinVAddr)
                                goto err_blocked;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+        if (map_unmap == 0 ) {
+
+           pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+
+        } else if (map_unmap == 1) {
+
+           pfnInnerCacheOp_mapUnamp (pvRangeAddrStart, ui32Length, DMA_TO_DEVICE);
+
+        } else {
+
+           pfnInnerCacheOp_mapUnamp (pvRangeAddrStart, ui32Length, DMA_FROM_DEVICE);
+
+        }
+#else
+pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+#endif
+
+               //      pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
 
 #if defined(CONFIG_OUTER_CACHE)
                        ui32PageNumOffset = ((ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr)) >> PAGE_SHIFT;
@@ -2768,6 +2877,7 @@ IMG_BOOL CheckExecuteCacheOp(IMG_HANDLE hOSMemHandle,
                        PVR_DBG_BREAK;
        }
 
+#if 0
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
         if (map_unmap == 0 ) {
 
@@ -2785,10 +2895,13 @@ IMG_BOOL CheckExecuteCacheOp(IMG_HANDLE hOSMemHandle,
 #else
 pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
 #endif
+#endif
 
 #if defined(CONFIG_OUTER_CACHE)
        
-       if (pfnMemAreaToPhys != IMG_NULL)
+//     if (pfnMemAreaToPhys != IMG_NULL)
+      PVR_ASSERT(pfnMemAreaToPhys != IMG_NULL);
+
        {
                unsigned long ulStart, ulEnd, ulLength, ulStartOffset, ulEndOffset;
                IMG_UINT32 i, ui32NumPages;
@@ -2816,10 +2929,10 @@ pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
                        pfnOuterCacheOp(ulStart, ulEnd);
                }
        }
-       else
-       {
-               PVR_DBG_BREAK;
-       }
+//     else
+//     {
+//             PVR_DBG_BREAK;
+//     }
 #endif
 
        return IMG_TRUE;
index 3abf604..3a59ce0 100644 (file)
@@ -432,7 +432,6 @@ PVRSRV_BridgeDispatchKM(struct file *pFile, unsigned int unref__ ioctlCmd, unsig
                        }
                        break;
                }
-
                case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
                {
                        PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN =
index 94d65c2..1127915 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/version.h>
 #include <linux/fs.h>
 #include <linux/proc_fs.h>
+#include <linux/sched.h>
 #include <asm/ioctl.h>
 #include <drm/drmP.h>
 #include <drm/drm.h>
 #define PVR_DRM_NAME   PVRSRV_MODNAME
 #define PVR_DRM_DESC   "Imagination Technologies PVR DRM"
 
+DECLARE_WAIT_QUEUE_HEAD(sWaitForInit);
+
+IMG_BOOL bInitComplete;
+IMG_BOOL bInitFailed;
+
 #if !defined(PVR_DRI_DRM_NOT_PCI)
 struct pci_dev *gpsPVRLDMDev;
 #endif
@@ -98,7 +104,7 @@ static struct pci_device_id asPciIdList[] = {
 DRI_DRM_STATIC int
 PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags)
 {
-       int iRes;
+       int iRes=0;
 
        PVR_TRACE(("PVRSRVDrmLoad"));
 
@@ -111,7 +117,7 @@ PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags)
        iRes = dbgdrv_init();
        if (iRes != 0)
        {
-               return iRes;
+               goto exit;
        }
 #endif
        
@@ -128,8 +134,8 @@ PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags)
                goto exit_pvrcore_cleanup;
        }
 #endif
-       return 0;
-
+//     return 0;
+        goto exit;
 #if defined(DISPLAY_CONTROLLER)
 exit_pvrcore_cleanup:
        PVRCore_Cleanup();
@@ -138,6 +144,16 @@ exit_dbgdrv_cleanup:
 #if defined(PDUMP)
        dbgdrv_cleanup();
 #endif
+exit:
+       if (iRes != 0)
+       {
+               bInitFailed = IMG_TRUE;
+       }
+       bInitComplete = IMG_TRUE;
+
+       wake_up_interruptible(&sWaitForInit);
+
+
        return iRes;
 }
 
@@ -162,6 +178,34 @@ PVRSRVDrmUnload(struct drm_device *dev)
 DRI_DRM_STATIC int
 PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file)
 {
+while (!bInitComplete)
+       {
+               DEFINE_WAIT(sWait);
+
+               prepare_to_wait(&sWaitForInit, &sWait, TASK_INTERRUPTIBLE);
+
+               if (!bInitComplete)
+               {
+                       PVR_TRACE(("%s: Waiting for module initialisation to complete", __FUNCTION__));
+
+                       schedule();
+               }
+
+               finish_wait(&sWaitForInit, &sWait);
+
+               if (signal_pending(current))
+               {
+                       return -ERESTARTSYS;
+               }
+       }
+
+       if (bInitFailed)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Module initialisation failed", __FUNCTION__));
+               return -EINVAL;
+       }
+
+
        return PVRSRVOpen(dev, file);
 }
 
@@ -263,6 +307,12 @@ PVRDRM_Display_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile)
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
 #define        PVR_DRM_FOPS_IOCTL      .unlocked_ioctl
 #define        PVR_DRM_UNLOCKED        DRM_UNLOCKED
+#define DRM_IOCTL_PVR_DRM_SRVKM_IOCTL PVR_DRM_SRVKM_IOCTL
+#define DRM_PVR_DRM_SRVKM_IOCTL PVR_DRM_SRVKM_IOCTL
+#define DRM_PVR_DRM_IS_MASTER_IOCTL PVR_DRM_IS_MASTER_IOCTL
+#define DRM_IOCTL_PVR_DRM_IS_MASTER_IOCTL PVR_DRM_IS_MASTER_IOCTL
+#define DRM_PVR_DRM_UNPRIV_IOCTL PVR_DRM_UNPRIV_IOCTL
+#define DRM_IOCTL_PVR_DRM_UNPRIV_IOCTL PVR_DRM_UNPRIV_IOCTL
 #else
 #define        PVR_DRM_FOPS_IOCTL      .ioctl
 #define        PVR_DRM_UNLOCKED        0
@@ -270,15 +320,29 @@ PVRDRM_Display_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile)
 
 #if !defined(SUPPORT_DRI_DRM_EXT)
 struct drm_ioctl_desc sPVRDrmIoctls[] = {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+       DRM_IOCTL_DEF_DRV(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, PVR_DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER | PVR_DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, PVR_DRM_UNLOCKED),
+#else
        DRM_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, PVR_DRM_UNLOCKED),
        DRM_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER | PVR_DRM_UNLOCKED),
        DRM_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, PVR_DRM_UNLOCKED),
+#endif
 #if defined(PDUMP)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+       DRM_IOCTL_DEF_DRV(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, PVR_DRM_UNLOCKED),
+#else
        DRM_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, PVR_DRM_UNLOCKED),
 #endif
+#endif
 #if defined(DISPLAY_CONTROLLER) && defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+       DRM_IOCTL_DEF_DRV(PVR_DRM_DISP_IOCTL, PVRDRM_Display_ioctl, DRM_MASTER | PVR_DRM_UNLOCKED)
+#else
        DRM_IOCTL_DEF(PVR_DRM_DISP_IOCTL, PVRDRM_Display_ioctl, DRM_MASTER | PVR_DRM_UNLOCKED)
 #endif
+#endif
 };
 
 static int pvr_max_ioctl = DRM_ARRAY_SIZE(sPVRDrmIoctls);
@@ -292,8 +356,10 @@ static struct drm_driver sPVRDrmDriver =
        .open = PVRSRVDrmOpen,
        .suspend = PVRSRVDriverSuspend,
        .resume = PVRSRVDriverResume,
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32))
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
+#endif
        .ioctls = sPVRDrmIoctls,
        .fops = 
        {
diff --git a/services4/srvkm/hwdefs/sgx531defs.h b/services4/srvkm/hwdefs/sgx531defs.h
deleted file mode 100644 (file)
index 9879a76..0000000
+++ /dev/null
@@ -1,584 +0,0 @@
-/**********************************************************************
- *
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
- * 
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- * 
- * This program is distributed in the hope it will be useful but, except 
- * as otherwise stated in writing, without any warranty; without even the 
- * implied warranty of merchantability or fitness for a particular purpose. 
- * See the GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * 
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
- *
- ******************************************************************************/
-
-#ifndef _SGX531DEFS_KM_H_
-#define _SGX531DEFS_KM_H_
-
-#define EUR_CR_CLKGATECTL                   0x0000
-#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK     0x00000003
-#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT    0
-#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK    0x0000000C
-#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT   2
-#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK     0x00000030
-#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT    4
-#define EUR_CR_CLKGATECTL_TE_CLKG_MASK      0x000000C0
-#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT     6
-#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK     0x00000300
-#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT    8
-#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK     0x00000C00
-#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT    10
-#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK     0x00003000
-#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT    12
-#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK     0x0000C000
-#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT    14
-#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000
-#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16
-#define EUR_CR_CLKGATECTL_TA_CLKG_MASK      0x000C0000
-#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT     18
-#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000
-#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
-#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK  0x10000000
-#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28
-#define EUR_CR_CLKGATECTL2                  0x0004
-#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK    0x00000003
-#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT   0
-#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_MASK 0x0000000C
-#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_SHIFT 2
-#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030
-#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4
-#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK   0x000000C0
-#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT  6
-#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK   0x00000300
-#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT  8
-#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK   0x00000C00
-#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT  10
-#define EUR_CR_CLKGATECTL2_MADD0_CLKG_MASK  0x00003000
-#define EUR_CR_CLKGATECTL2_MADD0_CLKG_SHIFT 12
-#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK   0x0000C000
-#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT  14
-#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK   0x00030000
-#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT  16
-#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK   0x000C0000
-#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT  18
-#define EUR_CR_CLKGATECTL2_MADD1_CLKG_MASK  0x00300000
-#define EUR_CR_CLKGATECTL2_MADD1_CLKG_SHIFT 20
-#define EUR_CR_CLKGATESTATUS                0x0008
-#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK  0x00000001
-#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0
-#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002
-#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1
-#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK  0x00000004
-#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2
-#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK   0x00000008
-#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT  3
-#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK  0x00000010
-#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4
-#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK  0x00000020
-#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5
-#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK  0x00000040
-#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6
-#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK  0x00000080
-#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7
-#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK  0x00000100
-#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8
-#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_MASK 0x00000200
-#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_SHIFT 9
-#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400
-#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10
-#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800
-#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11
-#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000
-#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12
-#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000
-#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13
-#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_MASK 0x00004000
-#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_SHIFT 14
-#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000
-#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15
-#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000
-#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16
-#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000
-#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17
-#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_MASK 0x00040000
-#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_SHIFT 18
-#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000
-#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19
-#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK   0x00100000
-#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT  20
-#define EUR_CR_CLKGATECTLOVR                0x000C
-#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK  0x00000003
-#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0
-#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000C
-#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2
-#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK  0x00000030
-#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4
-#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK   0x000000C0
-#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT  6
-#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK  0x00000300
-#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8
-#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK  0x00000C00
-#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10
-#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK  0x00003000
-#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12
-#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK  0x0000C000
-#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14
-#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000
-#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16
-#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK   0x000C0000
-#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT  18
-#define EUR_CR_CORE_ID                      0x0020
-#define EUR_CR_CORE_ID_CONFIG_MASK          0x0000FFFF
-#define EUR_CR_CORE_ID_CONFIG_SHIFT         0
-#define EUR_CR_CORE_ID_ID_MASK              0xFFFF0000
-#define EUR_CR_CORE_ID_ID_SHIFT             16
-#define EUR_CR_CORE_REVISION                0x0024
-#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FF
-#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
-#define EUR_CR_CORE_REVISION_MINOR_MASK     0x0000FF00
-#define EUR_CR_CORE_REVISION_MINOR_SHIFT    8
-#define EUR_CR_CORE_REVISION_MAJOR_MASK     0x00FF0000
-#define EUR_CR_CORE_REVISION_MAJOR_SHIFT    16
-#define EUR_CR_CORE_REVISION_DESIGNER_MASK  0xFF000000
-#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
-#define EUR_CR_DESIGNER_REV_FIELD1          0x0028
-#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFF
-#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
-#define EUR_CR_DESIGNER_REV_FIELD2          0x002C
-#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFF
-#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
-#define EUR_CR_SOFT_RESET                   0x0080
-#define EUR_CR_SOFT_RESET_BIF_RESET_MASK    0x00000001
-#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT   0
-#define EUR_CR_SOFT_RESET_VDM_RESET_MASK    0x00000002
-#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT   1
-#define EUR_CR_SOFT_RESET_DPM_RESET_MASK    0x00000004
-#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT   2
-#define EUR_CR_SOFT_RESET_TE_RESET_MASK     0x00000008
-#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT    3
-#define EUR_CR_SOFT_RESET_MTE_RESET_MASK    0x00000010
-#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT   4
-#define EUR_CR_SOFT_RESET_ISP_RESET_MASK    0x00000020
-#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT   5
-#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK   0x00000040
-#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT  6
-#define EUR_CR_SOFT_RESET_TSP_RESET_MASK    0x00000080
-#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT   7
-#define EUR_CR_SOFT_RESET_PDS_RESET_MASK    0x00000100
-#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT   8
-#define EUR_CR_SOFT_RESET_PBE_RESET_MASK    0x00000200
-#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT   9
-#define EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK 0x00000400
-#define EUR_CR_SOFT_RESET_CACHEL2_RESET_SHIFT 10
-#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800
-#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11
-#define EUR_CR_SOFT_RESET_MADD_RESET_MASK   0x00001000
-#define EUR_CR_SOFT_RESET_MADD_RESET_SHIFT  12
-#define EUR_CR_SOFT_RESET_ITR_RESET_MASK    0x00002000
-#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT   13
-#define EUR_CR_SOFT_RESET_TEX_RESET_MASK    0x00004000
-#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT   14
-#define EUR_CR_SOFT_RESET_USE_RESET_MASK    0x00008000
-#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT   15
-#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000
-#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16
-#define EUR_CR_SOFT_RESET_TA_RESET_MASK     0x00020000
-#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT    17
-#define EUR_CR_EVENT_HOST_ENABLE2           0x0110
-#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010
-#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4
-#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008
-#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3
-#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004
-#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2
-#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002
-#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
-#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001
-#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
-#define EUR_CR_EVENT_HOST_CLEAR2            0x0114
-#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010
-#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4
-#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008
-#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3
-#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004
-#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2
-#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002
-#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
-#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001
-#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
-#define EUR_CR_EVENT_STATUS2                0x0118
-#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK   0x00000010
-#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT  4
-#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK   0x00000008
-#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT  3
-#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK   0x00000004
-#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT  2
-#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002
-#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
-#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001
-#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
-#define EUR_CR_EVENT_STATUS                 0x012C
-#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000
-#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
-#define EUR_CR_EVENT_STATUS_TIMER_MASK      0x20000000
-#define EUR_CR_EVENT_STATUS_TIMER_SHIFT     29
-#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000
-#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
-#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000
-#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
-#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
-#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
-#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
-#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
-#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000
-#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
-#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000
-#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
-#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000
-#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
-#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000
-#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
-#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK   0x00100000
-#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT  20
-#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000
-#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
-#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000
-#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
-#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK   0x00020000
-#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT  17
-#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000
-#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
-#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000
-#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
-#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK   0x00004000
-#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT  14
-#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000
-#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
-#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000
-#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
-#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK  0x00000800
-#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
-#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK  0x00000400
-#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
-#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200
-#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
-#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100
-#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
-#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080
-#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
-#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040
-#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
-#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020
-#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
-#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010
-#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
-#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008
-#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
-#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
-#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
-#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
-#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
-#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001
-#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
-#define EUR_CR_EVENT_HOST_ENABLE            0x0130
-#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000
-#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
-#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000
-#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
-#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000
-#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
-#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000
-#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
-#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
-#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
-#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000
-#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
-#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000
-#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
-#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000
-#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
-#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000
-#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
-#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000
-#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
-#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000
-#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
-#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000
-#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
-#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000
-#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
-#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000
-#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
-#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000
-#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
-#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000
-#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
-#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800
-#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
-#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400
-#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001
-#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
-#define EUR_CR_EVENT_HOST_CLEAR             0x0134
-#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000
-#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
-#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK  0x20000000
-#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
-#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000
-#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
-#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000
-#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
-#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
-#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
-#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000
-#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
-#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000
-#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
-#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000
-#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
-#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000
-#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
-#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000
-#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
-#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000
-#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
-#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000
-#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
-#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000
-#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
-#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000
-#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
-#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000
-#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
-#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000
-#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
-#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800
-#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
-#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400
-#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001
-#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
-#define EUR_CR_EVENT_KICK1                  0x0AB0
-#define EUR_CR_EVENT_KICK1_NOW_MASK         0x000000FF
-#define EUR_CR_EVENT_KICK1_NOW_SHIFT        0
-#define EUR_CR_PDS_EXEC_BASE                0x0AB8
-#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK      0x0FF00000
-#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT     20
-#define EUR_CR_EVENT_KICK2                  0x0AC0
-#define EUR_CR_EVENT_KICK2_NOW_MASK         0x00000001
-#define EUR_CR_EVENT_KICK2_NOW_SHIFT        0
-#define EUR_CR_EVENT_KICKER                 0x0AC4
-#define EUR_CR_EVENT_KICKER_ADDRESS_MASK    0x0FFFFFF0
-#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT   4
-#define EUR_CR_EVENT_KICK                   0x0AC8
-#define EUR_CR_EVENT_KICK_NOW_MASK          0x00000001
-#define EUR_CR_EVENT_KICK_NOW_SHIFT         0
-#define EUR_CR_EVENT_TIMER                  0x0ACC
-#define EUR_CR_EVENT_TIMER_ENABLE_MASK      0x01000000
-#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT     24
-#define EUR_CR_EVENT_TIMER_VALUE_MASK       0x00FFFFFF
-#define EUR_CR_EVENT_TIMER_VALUE_SHIFT      0
-#define EUR_CR_PDS_INV0                     0x0AD0
-#define EUR_CR_PDS_INV0_DSC_MASK            0x00000001
-#define EUR_CR_PDS_INV0_DSC_SHIFT           0
-#define EUR_CR_PDS_INV1                     0x0AD4
-#define EUR_CR_PDS_INV1_DSC_MASK            0x00000001
-#define EUR_CR_PDS_INV1_DSC_SHIFT           0
-#define EUR_CR_EVENT_KICK3                  0x0AD8
-#define EUR_CR_EVENT_KICK3_NOW_MASK         0x00000001
-#define EUR_CR_EVENT_KICK3_NOW_SHIFT        0
-#define EUR_CR_PDS_INV3                     0x0ADC
-#define EUR_CR_PDS_INV3_DSC_MASK            0x00000001
-#define EUR_CR_PDS_INV3_DSC_SHIFT           0
-#define EUR_CR_PDS_INV_CSC                  0x0AE0
-#define EUR_CR_PDS_INV_CSC_KICK_MASK        0x00000001
-#define EUR_CR_PDS_INV_CSC_KICK_SHIFT       0
-#define EUR_CR_PDS_PC_BASE                  0x0B2C
-#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK     0x00FFFFFF
-#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT    0
-#define EUR_CR_BIF_CTRL                     0x0C00
-#define EUR_CR_BIF_CTRL_NOREORDER_MASK      0x00000001
-#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT     0
-#define EUR_CR_BIF_CTRL_PAUSE_MASK          0x00000002
-#define EUR_CR_BIF_CTRL_PAUSE_SHIFT         1
-#define EUR_CR_BIF_CTRL_FLUSH_MASK          0x00000004
-#define EUR_CR_BIF_CTRL_FLUSH_SHIFT         2
-#define EUR_CR_BIF_CTRL_INVALDC_MASK        0x00000008
-#define EUR_CR_BIF_CTRL_INVALDC_SHIFT       3
-#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK    0x00000010
-#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT   4
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK  0x00000400
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000
-#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
-#define EUR_CR_BIF_INT_STAT                 0x0C04
-#define EUR_CR_BIF_INT_STAT_FAULT_MASK      0x00003FFF
-#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT     0
-#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK    0x00004000
-#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT   14
-#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000
-#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15
-#define EUR_CR_BIF_FAULT                    0x0C08
-#define EUR_CR_BIF_FAULT_SB_MASK            0x000001F0
-#define EUR_CR_BIF_FAULT_SB_SHIFT           4
-#define EUR_CR_BIF_FAULT_ADDR_MASK          0x0FFFF000
-#define EUR_CR_BIF_FAULT_ADDR_SHIFT         12
-#define EUR_CR_BIF_DIR_LIST_BASE0           0x0C84
-#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000
-#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
-#define EUR_CR_BIF_TA_REQ_BASE              0x0C90
-#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK    0x0FF00000
-#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT   20
-#define EUR_CR_BIF_MEM_REQ_STAT             0x0CA8
-#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK  0x000000FF
-#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
-#define EUR_CR_BIF_3D_REQ_BASE              0x0CAC
-#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK    0x0FF00000
-#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT   20
-#define EUR_CR_BIF_ZLS_REQ_BASE             0x0CB0
-#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK   0x0FF00000
-#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT  20
-#define EUR_CR_2D_BLIT_STATUS               0x0E04
-#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFF
-#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
-#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK     0x01000000
-#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT    24
-#define EUR_CR_2D_VIRTUAL_FIFO_0            0x0E10
-#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001
-#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
-#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000E
-#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
-#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0
-#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
-#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000
-#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
-#define EUR_CR_2D_VIRTUAL_FIFO_1            0x0E14
-#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFF
-#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
-#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000
-#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
-#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000
-#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
-#define EUR_CR_USE_CODE_BASE(X)     (0x0A0C + (4 * (X)))
-#define EUR_CR_USE_CODE_BASE_ADDR_MASK      0x00FFFFFF
-#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT     0
-#define EUR_CR_USE_CODE_BASE_DM_MASK        0x03000000
-#define EUR_CR_USE_CODE_BASE_DM_SHIFT       24
-#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
-#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
-#define EUR_CR_MNE_CR_CTRL                                             0x0D00
-#define EUR_CR_MNE_CR_CTRL_BYP_CC_N_MASK               0x00010000
-#define EUR_CR_MNE_CR_CTRL_BYP_CC_N_SHIFT              16
-#define EUR_CR_MNE_CR_CTRL_BYP_CC_MASK                 0x00008000
-#define EUR_CR_MNE_CR_CTRL_BYP_CC_SHIFT                        15
-#define EUR_CR_MNE_CR_CTRL_USE_INVAL_ADDR_MASK 0x00007800
-#define EUR_CR_MNE_CR_CTRL_USE_INVAL_ADDR_SHIFT        11
-#define EUR_CR_MNE_CR_CTRL_BYPASS_ALL_MASK             0x00000400
-#define EUR_CR_MNE_CR_CTRL_BYPASS_ALL_SHIFT            10
-#define EUR_CR_MNE_CR_CTRL_BYPASS_MASK                 0x000003E0
-#define EUR_CR_MNE_CR_CTRL_BYPASS_SHIFT                        5
-#define EUR_CR_MNE_CR_CTRL_PAUSE_MASK                  0x00000010
-#define EUR_CR_MNE_CR_CTRL_PAUSE_SHIFT                 4
-#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_MASK             0x0000000E
-#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT            1
-#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_PDS_MASK (1<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT+2)
-#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_USEC_MASK (1<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT+1)
-#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_CACHE_MASK (1<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT)
-#define EUR_CR_MNE_CR_CTRL_INVAL_ALL_MASK              0x00000001
-#define EUR_CR_MNE_CR_CTRL_INVAL_ALL_SHIFT             0
-#define EUR_CR_MNE_CR_USE_INVAL                                        0x0D04
-#define EUR_CR_MNE_CR_USE_INVAL_ADDR_MASK              0xFFFFFFFF
-#define EUR_CR_MNE_CR_USE_INVAL_ADDR_SHIFT             0
-#define EUR_CR_MNE_CR_STAT                                     0x0D08
-#define EUR_CR_MNE_CR_STAT_PAUSED_MASK         0x00000400
-#define EUR_CR_MNE_CR_STAT_PAUSED_SHIFT                10
-#define EUR_CR_MNE_CR_STAT_READS_MASK          0x000003FF
-#define EUR_CR_MNE_CR_STAT_READS_SHIFT         0
-#define EUR_CR_MNE_CR_STAT_STATS                       0x0D0C
-#define EUR_CR_MNE_CR_STAT_STATS_RST_MASK      0x000FFFF0
-#define EUR_CR_MNE_CR_STAT_STATS_RST_SHIFT     4
-#define EUR_CR_MNE_CR_STAT_STATS_SEL_MASK      0x0000000F
-#define EUR_CR_MNE_CR_STAT_STATS_SEL_SHIFT     0
-#define EUR_CR_MNE_CR_STAT_STATS_OUT                           0x0D10
-#define EUR_CR_MNE_CR_STAT_STATS_OUT_VALUE_MASK                0xFFFFFFFF
-#define EUR_CR_MNE_CR_STAT_STATS_OUT_VALUE_SHIFT       0
-#define EUR_CR_MNE_CR_EVENT_STATUS                             0x0D14
-#define EUR_CR_MNE_CR_EVENT_STATUS_INVAL_MASK  0x00000001
-#define EUR_CR_MNE_CR_EVENT_STATUS_INVAL_SHIFT 0
-#define EUR_CR_MNE_CR_EVENT_CLEAR                              0x0D18
-#define EUR_CR_MNE_CR_EVENT_CLEAR_INVAL_MASK   0x00000001
-#define EUR_CR_MNE_CR_EVENT_CLEAR_INVAL_SHIFT  0
-#define EUR_CR_MNE_CR_CTRL_INVAL                               0x0D20
-
-#endif 
-
index bed2351..f41bd9e 100644 (file)
@@ -288,11 +288,12 @@ PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
 
 #if !defined(USE_CODE)
 
-IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
-                                                                                                  IMG_UINT32 ui32Value,
-                                                                                                  IMG_UINT32 ui32Mask,
-                                                                                                  IMG_UINT32 ui32Waitus,
-                                                                                                  IMG_UINT32 ui32Tries);
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32*       pui32LinMemAddr,
+                                                                                                       IMG_UINT32                              ui32Value,
+                                                                                                       IMG_UINT32                              ui32Mask,
+                                                                                                       IMG_UINT32                              ui32Timeoutus,
+                                                                                                       IMG_UINT32                              ui32PollPeriodus,
+                                                                                                       IMG_BOOL                                bAllowPreemption);
 
 #endif 
 
index 8ffbea6..ab86788 100644 (file)
@@ -445,6 +445,7 @@ PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource);
 PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE *psResource);
 IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
 IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus);
+IMG_VOID OSSleepms(IMG_UINT32 ui32Timems);
 IMG_VOID OSReleaseThreadQuanta(IMG_VOID);
 IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg);
 IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value);
similarity index 97%
rename from services4/system/ti8168/oemfuncs.h
rename to services4/system/omap4/oemfuncs.h
index 1131534..206f7fa 100644 (file)
@@ -1,56 +1,56 @@
-/**********************************************************************\r
- *\r
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.\r
- * \r
- * This program is free software; you can redistribute it and/or modify it\r
- * under the terms and conditions of the GNU General Public License,\r
- * version 2, as published by the Free Software Foundation.\r
- * \r
- * This program is distributed in the hope it will be useful but, except \r
- * as otherwise stated in writing, without any warranty; without even the \r
- * implied warranty of merchantability or fitness for a particular purpose. \r
- * See the GNU General Public License for more details.\r
- * \r
- * You should have received a copy of the GNU General Public License along with\r
- * this program; if not, write to the Free Software Foundation, Inc.,\r
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\r
- * \r
- * The full GNU General Public License is included in this distribution in\r
- * the file called "COPYING".\r
- *\r
- * Contact Information:\r
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>\r
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK \r
- *\r
- ******************************************************************************/\r
-\r
-#if !defined(__OEMFUNCS_H__)\r
-#define __OEMFUNCS_H__\r
-\r
-#if defined (__cplusplus)\r
-extern "C" {\r
-#endif\r
-\r
-typedef IMG_UINT32   (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32  Ioctl,\r
-                                                                                               IMG_BYTE   *pInBuf,\r
-                                                                                               IMG_UINT32  InBufLen, \r
-                                                                                           IMG_BYTE   *pOutBuf,\r
-                                                                                               IMG_UINT32  OutBufLen,\r
-                                                                                               IMG_UINT32 *pdwBytesTransferred);\r
-typedef struct PVRSRV_DC_OEM_JTABLE_TAG\r
-{\r
-       PFN_SRV_BRIDGEDISPATCH                  pfnOEMBridgeDispatch;\r
-       IMG_PVOID                                               pvDummy1;\r
-       IMG_PVOID                                               pvDummy2;\r
-       IMG_PVOID                                               pvDummy3;\r
-\r
-} PVRSRV_DC_OEM_JTABLE;\r
-\r
-#define OEM_GET_EXT_FUNCS                      (1<<1)\r
-\r
-#if defined(__cplusplus)\r
-}\r
-#endif\r
-\r
-#endif \r
-\r
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#if !defined(__OEMFUNCS_H__)
+#define __OEMFUNCS_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+typedef IMG_UINT32   (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32  Ioctl,
+                                                                                               IMG_BYTE   *pInBuf,
+                                                                                               IMG_UINT32  InBufLen, 
+                                                                                           IMG_BYTE   *pOutBuf,
+                                                                                               IMG_UINT32  OutBufLen,
+                                                                                               IMG_UINT32 *pdwBytesTransferred);
+typedef struct PVRSRV_DC_OEM_JTABLE_TAG
+{
+       PFN_SRV_BRIDGEDISPATCH                  pfnOEMBridgeDispatch;
+       IMG_PVOID                                               pvDummy1;
+       IMG_PVOID                                               pvDummy2;
+       IMG_PVOID                                               pvDummy3;
+
+} PVRSRV_DC_OEM_JTABLE;
+
+#define OEM_GET_EXT_FUNCS                      (1<<1)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif 
+
similarity index 80%
rename from services4/system/ti8168/sysconfig.c
rename to services4/system/omap4/sysconfig.c
index 38f3e4b..929d108 100644 (file)
-/**********************************************************************\r
- *\r
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.\r
- * \r
- * This program is free software; you can redistribute it and/or modify it\r
- * under the terms and conditions of the GNU General Public License,\r
- * version 2, as published by the Free Software Foundation.\r
- * \r
- * This program is distributed in the hope it will be useful but, except \r
- * as otherwise stated in writing, without any warranty; without even the \r
- * implied warranty of merchantability or fitness for a particular purpose. \r
- * See the GNU General Public License for more details.\r
- * \r
- * You should have received a copy of the GNU General Public License along with\r
- * this program; if not, write to the Free Software Foundation, Inc.,\r
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\r
- * \r
- * The full GNU General Public License is included in this distribution in\r
- * the file called "COPYING".\r
- *\r
- * Contact Information:\r
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>\r
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK \r
- *\r
- ******************************************************************************/\r
-\r
-#include "services_headers.h"\r
-#include "kerneldisplay.h"\r
-#include "oemfuncs.h"\r
-#include "sgxinfo.h"\r
-#include "pdump_km.h"\r
-#include "sgxinfokm.h"\r
-#include "syslocal.h"\r
-#include "sysconfig.h"\r
-\r
-SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;\r
-SYS_DATA  gsSysData;\r
-\r
-static SYS_SPECIFIC_DATA gsSysSpecificData;\r
-SYS_SPECIFIC_DATA *gpsSysSpecificData;\r
-\r
-static IMG_UINT32      gui32SGXDeviceID;\r
-static SGX_DEVICE_MAP  gsSGXDeviceMap;\r
-static PVRSRV_DEVICE_NODE *gpsSGXDevNode;\r
-\r
-#define DEVICE_SGX_INTERRUPT (1 << 0)\r
-\r
-#if defined(NO_HARDWARE)\r
-static IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr;\r
-#endif\r
-\r
-IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32  Ioctl,\r
-                                                                  IMG_BYTE             *pInBuf,\r
-                                                                  IMG_UINT32   InBufLen,\r
-                                                                  IMG_BYTE             *pOutBuf,\r
-                                                                  IMG_UINT32   OutBufLen,\r
-                                                                  IMG_UINT32   *pdwBytesTransferred);\r
-\r
-static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)\r
-{\r
-#if defined(NO_HARDWARE)\r
-       PVRSRV_ERROR eError;\r
-       IMG_CPU_PHYADDR sCpuPAddr;\r
-#endif\r
-\r
-       PVR_UNREFERENCED_PARAMETER(psSysData);\r
-\r
-       \r
-       gsSGXDeviceMap.ui32Flags = 0x0;\r
-       \r
-#if defined(NO_HARDWARE)\r
-       \r
-       \r
-       eError = OSBaseAllocContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, \r
-                                                                        &gsSGXRegsCPUVAddr,\r
-                                                                        &sCpuPAddr);\r
-       if(eError != PVRSRV_OK)\r
-       {\r
-               return eError;\r
-       }\r
-       gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;\r
-       gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);\r
-       gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;\r
-#if defined(__linux__)\r
-       \r
-       gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;\r
-#else\r
-       \r
-       gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL;\r
-#endif\r
-\r
-       OSMemSet(gsSGXRegsCPUVAddr, 0, SYS_OMAP3430_SGX_REGS_SIZE);\r
-\r
-       \r
-\r
-\r
-       gsSGXDeviceMap.ui32IRQ = 0;\r
-\r
-#else \r
-\r
-       gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE;\r
-       gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);\r
-       gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;\r
-\r
-       gsSGXDeviceMap.ui32IRQ = SYS_OMAP3430_SGX_IRQ;\r
-\r
-#endif \r
-\r
-\r
-       \r
-\r
-\r
-       return PVRSRV_OK;\r
-}\r
-\r
-\r
-IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion)\r
-{\r
-       static IMG_CHAR aszVersionString[100];\r
-       SYS_DATA        *psSysData;\r
-       IMG_UINT32      ui32SGXRevision;\r
-       IMG_INT32       i32Count;\r
-#if !defined(NO_HARDWARE)\r
-       IMG_VOID        *pvRegsLinAddr;\r
-\r
-       pvRegsLinAddr = OSMapPhysToLin(sRegRegion,\r
-                                                                  SYS_OMAP3430_SGX_REGS_SIZE,\r
-                                                                  PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,\r
-                                                                  IMG_NULL);\r
-       if(!pvRegsLinAddr)\r
-       {\r
-               return IMG_NULL;\r
-       }\r
-\r
-       ui32SGXRevision = OSReadHWReg((IMG_PVOID)((IMG_PBYTE)pvRegsLinAddr),\r
-                                                                 EUR_CR_CORE_REVISION);\r
-#else\r
-       ui32SGXRevision = 0;\r
-#endif\r
-\r
-       if (SysAcquireData(&psSysData) != PVRSRV_OK)\r
-       {\r
-               return IMG_NULL;\r
-       }\r
-\r
-       i32Count = OSSNPrintf(aszVersionString, 100,\r
-                                                 "SGX revision = %u.%u.%u",\r
-                                                 (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)\r
-                                                       >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),\r
-                                                 (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)\r
-                                                       >> EUR_CR_CORE_REVISION_MINOR_SHIFT),\r
-                                                 (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)\r
-                                                       >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)\r
-                                                );\r
-\r
-#if !defined(NO_HARDWARE)\r
-       OSUnMapPhysToLin(pvRegsLinAddr,\r
-                                        SYS_OMAP3430_SGX_REGS_SIZE,\r
-                                        PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,\r
-                                        IMG_NULL);\r
-#endif\r
-\r
-       if(i32Count == -1)\r
-       {\r
-               return IMG_NULL;\r
-       }\r
-\r
-       return aszVersionString;\r
-}\r
-\r
-\r
-PVRSRV_ERROR SysInitialise(IMG_VOID)\r
-{\r
-       IMG_UINT32                      i;\r
-       PVRSRV_ERROR            eError;\r
-       PVRSRV_DEVICE_NODE      *psDeviceNode;\r
-       IMG_CPU_PHYADDR         TimerRegPhysBase;\r
-\r
-#if defined(DEBUG)\r
-       PVR_DPF((PVR_DBG_WARNING,"SysInitialise: Entering..."));\r
-#endif\r
-\r
-#if !defined(SGX_DYNAMIC_TIMING_INFO)\r
-       SGX_TIMING_INFORMATION* psTimingInfo;\r
-#endif\r
-       gpsSysData = &gsSysData;\r
-       OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));\r
-\r
-       gpsSysSpecificData =  &gsSysSpecificData;\r
-       OSMemSet(gpsSysSpecificData, 0, sizeof(SYS_SPECIFIC_DATA));\r
-\r
-       gpsSysData->pvSysSpecificData = gpsSysSpecificData;\r
-\r
-       eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);\r
-       if (eError != PVRSRV_OK)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));\r
-               SysDeinitialise(gpsSysData);\r
-               gpsSysData = IMG_NULL;\r
-               return eError;\r
-       }\r
-       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA);\r
-\r
-       gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;\r
-\r
-       \r
-       for(i=0; i<SYS_DEVICE_COUNT; i++)\r
-       {\r
-               gpsSysData->sDeviceID[i].uiID = i;\r
-               gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;\r
-       }\r
-\r
-       gpsSysData->psDeviceNodeList = IMG_NULL;\r
-       gpsSysData->psQueueList = IMG_NULL;\r
-\r
-       eError = SysInitialiseCommon(gpsSysData);\r
-       if (eError != PVRSRV_OK)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));\r
-               SysDeinitialise(gpsSysData);\r
-               gpsSysData = IMG_NULL;\r
-               return eError;\r
-       }\r
-\r
-       TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE;\r
-       gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;\r
-       gpsSysData->hSOCTimerRegisterOSMemHandle = 0;\r
-       OSReservePhys(TimerRegPhysBase,\r
-                                 4,\r
-                                 PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,\r
-                                 (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,\r
-                                 &gpsSysData->hSOCTimerRegisterOSMemHandle);\r
-\r
-#if !defined(SGX_DYNAMIC_TIMING_INFO)\r
-       \r
-       psTimingInfo = &gsSGXDeviceMap.sTimingInfo;\r
-       psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;\r
-       psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ; \r
-       psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS; \r
-       psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ; \r
-#endif\r
-\r
-       \r
-\r
-       gpsSysSpecificData->ui32SrcClockDiv = 3;\r
-\r
-       \r
-\r
-\r
-\r
-       eError = SysLocateDevices(gpsSysData);\r
-       if (eError != PVRSRV_OK)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));\r
-               SysDeinitialise(gpsSysData);\r
-               gpsSysData = IMG_NULL;\r
-               return eError;\r
-       }\r
-       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);\r
-\r
-       \r
-\r
-\r
-       eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,\r
-                                                                 DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);\r
-       if (eError != PVRSRV_OK)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));\r
-               SysDeinitialise(gpsSysData);\r
-               gpsSysData = IMG_NULL;\r
-               return eError;\r
-       }\r
-       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_REGDEV);\r
-\r
-       \r
-\r
-\r
-       \r
-       psDeviceNode = gpsSysData->psDeviceNodeList;\r
-       while(psDeviceNode)\r
-       {\r
-               \r
-               switch(psDeviceNode->sDevId.eDeviceType)\r
-               {\r
-                       case PVRSRV_DEVICE_TYPE_SGX:\r
-                       {\r
-                               DEVICE_MEMORY_INFO *psDevMemoryInfo;\r
-                               DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;\r
-\r
-                               \r
-\r
-\r
-                               psDeviceNode->psLocalDevMemArena = IMG_NULL;\r
-\r
-                               \r
-                               psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;\r
-                               psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;\r
-\r
-                               \r
-                               for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)\r
-                               {\r
-                                       psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;\r
-                               }\r
-\r
-                               gpsSGXDevNode = psDeviceNode;\r
-                               gsSysSpecificData.psSGXDevNode = psDeviceNode;\r
-\r
-                               break;\r
-                       }\r
-                       default:\r
-                               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));\r
-                               return PVRSRV_ERROR_INIT_FAILURE;\r
-               }\r
-\r
-               \r
-               psDeviceNode = psDeviceNode->psNext;\r
-       }\r
-\r
-       PDUMPINIT();\r
-       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT);\r
-\r
-       eError = EnableSystemClocks(gpsSysData);\r
-       if (eError != PVRSRV_OK)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable system clocks (%d)", eError));\r
-               SysDeinitialise(gpsSysData);\r
-               gpsSysData = IMG_NULL;\r
-               return eError;\r
-       }\r
-       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);\r
-\r
-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)\r
-       eError = EnableSGXClocks(gpsSysData);\r
-       if (eError != PVRSRV_OK)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));\r
-               SysDeinitialise(gpsSysData);\r
-               gpsSysData = IMG_NULL;\r
-               return eError;\r
-       }\r
-#endif \r
-\r
-       eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);\r
-       if (eError != PVRSRV_OK)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));\r
-               SysDeinitialise(gpsSysData);\r
-               gpsSysData = IMG_NULL;\r
-               return eError;\r
-       }\r
-       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV);\r
-\r
-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)\r
-       \r
-       DisableSGXClocks(gpsSysData);\r
-#endif \r
-\r
-       return PVRSRV_OK;\r
-}\r
-\r
-\r
-PVRSRV_ERROR SysFinalise(IMG_VOID)\r
-{\r
-       PVRSRV_ERROR eError = PVRSRV_OK;\r
-       \r
-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)\r
-       eError = EnableSGXClocks(gpsSysData);\r
-       if (eError != PVRSRV_OK)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));\r
-               SysDeinitialise(gpsSysData);\r
-               gpsSysData = IMG_NULL;\r
-               return eError;\r
-       }\r
-#endif \r
-\r
-#if defined(SYS_USING_INTERRUPTS)\r
-\r
-       eError = OSInstallMISR(gpsSysData);\r
-       if (eError != PVRSRV_OK)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install MISR"));\r
-               SysDeinitialise(gpsSysData);\r
-               gpsSysData = IMG_NULL;\r
-               return eError;\r
-       }\r
-       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR);\r
-\r
-       \r
-       eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);\r
-       if (eError != PVRSRV_OK)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install ISR"));\r
-               SysDeinitialise(gpsSysData);\r
-               gpsSysData = IMG_NULL;\r
-               return eError;\r
-       }\r
-       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);\r
-#endif \r
-\r
-       \r
-       gpsSysData->pszVersionString = SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);\r
-       if (!gpsSysData->pszVersionString)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string"));\r
-       }\r
-       else\r
-       {\r
-               PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));\r
-       }\r
-\r
-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)\r
-       \r
-       DisableSGXClocks(gpsSysData);\r
-#endif \r
-\r
-       gpsSysSpecificData->bSGXInitComplete = IMG_TRUE;\r
-\r
-       return eError;\r
-}\r
-\r
-\r
-PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)\r
-{\r
-       PVRSRV_ERROR eError;\r
-       \r
-#if defined(SYS_USING_INTERRUPTS)\r
-       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))\r
-       {\r
-               eError = OSUninstallDeviceLISR(psSysData);\r
-               if (eError != PVRSRV_OK)\r
-               {\r
-                       PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallDeviceLISR failed"));\r
-                       return eError;\r
-               }\r
-       }\r
-\r
-       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR))\r
-       {\r
-               eError = OSUninstallMISR(psSysData);\r
-               if (eError != PVRSRV_OK)\r
-               {\r
-                       PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));\r
-                       return eError;\r
-               }\r
-       }\r
-#else\r
-       PVR_UNREFERENCED_PARAMETER(psSysData);\r
-#endif \r
-\r
-       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV))\r
-       {\r
-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)\r
-               PVR_ASSERT(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS));\r
-               \r
-               eError = EnableSGXClocks(gpsSysData);\r
-               if (eError != PVRSRV_OK)\r
-               {\r
-                       PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: EnableSGXClocks failed"));\r
-                       return eError;\r
-               }\r
-#endif \r
-\r
-               \r
-               eError = PVRSRVDeinitialiseDevice (gui32SGXDeviceID);\r
-               if (eError != PVRSRV_OK)\r
-               {\r
-                       PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));\r
-                       return eError;\r
-               }\r
-       }\r
-       \r
-       \r
-\r
-       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))\r
-       {\r
-               DisableSystemClocks(gpsSysData);\r
-       }\r
-\r
-       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA))\r
-       {       \r
-               eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData);\r
-               if (eError != PVRSRV_OK)\r
-               {\r
-                       PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));\r
-                       return eError;\r
-               }\r
-       }\r
-\r
-       if(gpsSysData->pvSOCTimerRegisterKM)\r
-       {\r
-               OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,\r
-                                               4,\r
-                                               PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,\r
-                                               gpsSysData->hSOCTimerRegisterOSMemHandle);\r
-       }\r
-\r
-       SysDeinitialiseCommon(gpsSysData);\r
-\r
-#if defined(NO_HARDWARE)\r
-       if(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV))\r
-       {\r
-               \r
-               OSBaseFreeContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase);\r
-       }\r
-#endif\r
-\r
-       \r
-       if(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT))\r
-       {\r
-               PDUMPDEINIT();\r
-       }\r
-\r
-       gpsSysSpecificData->ui32SysSpecificData = 0;\r
-       gpsSysSpecificData->bSGXInitComplete = IMG_FALSE;\r
-\r
-       gpsSysData = IMG_NULL;\r
-\r
-       return PVRSRV_OK;\r
-}\r
-\r
-\r
-PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE  eDeviceType,\r
-                                                                  IMG_VOID                             **ppvDeviceMap)\r
-{\r
-\r
-       switch(eDeviceType)\r
-       {\r
-               case PVRSRV_DEVICE_TYPE_SGX:\r
-               {\r
-                       \r
-                       *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;\r
-\r
-                       break;\r
-               }\r
-               default:\r
-               {\r
-                       PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));\r
-               }\r
-       }\r
-       return PVRSRV_OK;\r
-}\r
-\r
-\r
-IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE       eDeviceType,\r
-                                                                         IMG_CPU_PHYADDR               CpuPAddr)\r
-{\r
-       IMG_DEV_PHYADDR DevPAddr;\r
-\r
-       PVR_UNREFERENCED_PARAMETER(eDeviceType);\r
-\r
-       \r
-       DevPAddr.uiAddr = CpuPAddr.uiAddr;\r
-       \r
-       return DevPAddr;\r
-}\r
-\r
-IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)\r
-{\r
-       IMG_CPU_PHYADDR cpu_paddr;\r
-\r
-       \r
-       cpu_paddr.uiAddr = sys_paddr.uiAddr;\r
-       return cpu_paddr;\r
-}\r
-\r
-IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)\r
-{\r
-       IMG_SYS_PHYADDR sys_paddr;\r
-\r
-       \r
-       sys_paddr.uiAddr = cpu_paddr.uiAddr;\r
-       return sys_paddr;\r
-}\r
-\r
-\r
-IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)\r
-{\r
-       IMG_DEV_PHYADDR DevPAddr;\r
-\r
-       PVR_UNREFERENCED_PARAMETER(eDeviceType);\r
-\r
-       \r
-       DevPAddr.uiAddr = SysPAddr.uiAddr;\r
-\r
-       return DevPAddr;\r
-}\r
-\r
-\r
-IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)\r
-{\r
-       IMG_SYS_PHYADDR SysPAddr;\r
-\r
-       PVR_UNREFERENCED_PARAMETER(eDeviceType);\r
-\r
-       \r
-       SysPAddr.uiAddr = DevPAddr.uiAddr;\r
-\r
-       return SysPAddr;\r
-}\r
-\r
-\r
-IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)\r
-{\r
-       PVR_UNREFERENCED_PARAMETER(psDeviceNode);\r
-}\r
-\r
-\r
-IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)\r
-{\r
-       PVR_UNREFERENCED_PARAMETER(psDeviceNode);\r
-}\r
-\r
-\r
-IMG_UINT32 SysGetInterruptSource(SYS_DATA                      *psSysData,\r
-                                                                PVRSRV_DEVICE_NODE     *psDeviceNode)\r
-{\r
-       PVR_UNREFERENCED_PARAMETER(psSysData);\r
-#if defined(NO_HARDWARE)\r
-       \r
-       return 0xFFFFFFFF;\r
-#else\r
-       \r
-       return psDeviceNode->ui32SOCInterruptBit;\r
-#endif\r
-}\r
-\r
-\r
-IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)\r
-{\r
-       PVR_UNREFERENCED_PARAMETER(psSysData);\r
-       PVR_UNREFERENCED_PARAMETER(ui32ClearBits);\r
-\r
-       \r
-       OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM,\r
-                                                                               EUR_CR_EVENT_HOST_CLEAR);\r
-}\r
-\r
-\r
-PVRSRV_ERROR SysSystemPrePowerState(PVR_POWER_STATE eNewPowerState)\r
-{\r
-       PVRSRV_ERROR eError = PVRSRV_OK;\r
-\r
-       if (eNewPowerState == PVRSRV_POWER_STATE_D3)\r
-       {\r
-               PVR_TRACE(("SysSystemPrePowerState: Entering state D3"));\r
-\r
-#if defined(SYS_USING_INTERRUPTS)\r
-               if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))\r
-               {\r
-#if defined(SYS_CUSTOM_POWERLOCK_WRAP)\r
-                       IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData);\r
-#endif\r
-                       eError = OSUninstallDeviceLISR(gpsSysData);\r
-#if defined(SYS_CUSTOM_POWERLOCK_WRAP)\r
-                       if (bWrapped)\r
-                       {\r
-                               UnwrapSystemPowerChange(&gsSysSpecificData);\r
-                       }\r
-#endif\r
-                       if (eError != PVRSRV_OK)\r
-                       {\r
-                               PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSUninstallDeviceLISR failed (%d)", eError));\r
-                               return eError;\r
-                       }\r
-                       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);\r
-                       SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);\r
-               }\r
-#endif\r
-\r
-               if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))\r
-               {\r
-                       DisableSystemClocks(gpsSysData);\r
-\r
-                       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);\r
-                       SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);\r
-               }\r
-       }\r
-\r
-       return eError;\r
-}\r
-\r
-\r
-PVRSRV_ERROR SysSystemPostPowerState(PVR_POWER_STATE eNewPowerState)\r
-{\r
-       PVRSRV_ERROR eError = PVRSRV_OK;\r
-\r
-       if (eNewPowerState == PVRSRV_POWER_STATE_D0)\r
-       {\r
-               PVR_TRACE(("SysSystemPostPowerState: Entering state D0"));\r
-\r
-               if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS))\r
-               {\r
-                       eError = EnableSystemClocks(gpsSysData);\r
-                       if (eError != PVRSRV_OK)\r
-                       {\r
-                               PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: EnableSystemClocks failed (%d)", eError));\r
-                               return eError;\r
-                       }\r
-                       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);\r
-                       SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);\r
-               }\r
-\r
-#if defined(SYS_USING_INTERRUPTS)\r
-               if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR))\r
-               {\r
-#if defined(SYS_CUSTOM_POWERLOCK_WRAP)\r
-                       IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData);\r
-#endif\r
-\r
-                       eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);\r
-#if defined(SYS_CUSTOM_POWERLOCK_WRAP)\r
-                       if (bWrapped)\r
-                       {\r
-                               UnwrapSystemPowerChange(&gsSysSpecificData);\r
-                       }\r
-#endif\r
-                       if (eError != PVRSRV_OK)\r
-                       {\r
-                               PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSInstallDeviceLISR failed to install ISR (%d)", eError));\r
-                               return eError;\r
-                       }\r
-                       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);\r
-                       SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);\r
-               }\r
-#endif\r
-       }\r
-       return eError;\r
-}\r
-\r
-\r
-PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32                 ui32DeviceIndex,\r
-                                                                       PVR_POWER_STATE         eNewPowerState,\r
-                                                                       PVR_POWER_STATE         eCurrentPowerState)\r
-{\r
-       PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);\r
-\r
-       if (ui32DeviceIndex != gui32SGXDeviceID)\r
-       {\r
-               return PVRSRV_OK;\r
-       }\r
-\r
-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)\r
-       if (eNewPowerState == PVRSRV_POWER_STATE_D3)\r
-       {\r
-               PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePrePowerState: SGX Entering state D3"));\r
-               DisableSGXClocks(gpsSysData);\r
-               PVRSRVSetDCState(DC_STATE_SUSPEND_COMMANDS);\r
-       }\r
-#else  \r
-       PVR_UNREFERENCED_PARAMETER(eNewPowerState );\r
-#endif \r
-       return PVRSRV_OK;\r
-}\r
-\r
-\r
-PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32                        ui32DeviceIndex,\r
-                                                                        PVR_POWER_STATE        eNewPowerState,\r
-                                                                        PVR_POWER_STATE        eCurrentPowerState)\r
-{\r
-       PVRSRV_ERROR eError = PVRSRV_OK;\r
-\r
-       PVR_UNREFERENCED_PARAMETER(eNewPowerState);\r
-\r
-       if (ui32DeviceIndex != gui32SGXDeviceID)\r
-       {\r
-               return eError;\r
-       }\r
-\r
-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)\r
-       if (eCurrentPowerState == PVRSRV_POWER_STATE_D3)\r
-       {\r
-               PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePostPowerState: SGX Leaving state D3"));\r
-               PVRSRVSetDCState(DC_STATE_RESUME_COMMANDS);\r
-               eError = EnableSGXClocks(gpsSysData);\r
-       }\r
-#else  \r
-       PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);\r
-#endif \r
-\r
-       return eError;\r
-}\r
-\r
-\r
-PVRSRV_ERROR SysOEMFunction (  IMG_UINT32      ui32ID,\r
-                                                               IMG_VOID        *pvIn,\r
-                                                               IMG_UINT32      ulInSize,\r
-                                                               IMG_VOID        *pvOut,\r
-                                                               IMG_UINT32      ulOutSize)\r
-{\r
-       PVR_UNREFERENCED_PARAMETER(ui32ID);\r
-       PVR_UNREFERENCED_PARAMETER(pvIn);\r
-       PVR_UNREFERENCED_PARAMETER(ulInSize);\r
-       PVR_UNREFERENCED_PARAMETER(pvOut);\r
-       PVR_UNREFERENCED_PARAMETER(ulOutSize);\r
-\r
-       if ((ui32ID == OEM_GET_EXT_FUNCS) &&\r
-               (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))\r
-       {\r
-               \r
-               PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*) pvOut;\r
-               psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM;\r
-               return PVRSRV_OK;\r
-       }\r
-\r
-       return PVRSRV_ERROR_INVALID_PARAMS;\r
-}\r
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "kerneldisplay.h"
+#include "oemfuncs.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#include "syslocal.h"
+#include "sysconfig.h"
+
+#include "ocpdefs.h"
+
+#if !defined(NO_HARDWARE) && \
+     defined(SYS_USING_INTERRUPTS) && \
+     defined(SGX540) && (SGX_CORE_REV == 110)
+#define SGX_OCP_REGS_ENABLED
+#endif
+
+SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
+SYS_DATA  gsSysData;
+
+static SYS_SPECIFIC_DATA gsSysSpecificData;
+SYS_SPECIFIC_DATA *gpsSysSpecificData;
+
+static IMG_UINT32      gui32SGXDeviceID;
+static SGX_DEVICE_MAP  gsSGXDeviceMap;
+static PVRSRV_DEVICE_NODE *gpsSGXDevNode;
+
+#define DEVICE_SGX_INTERRUPT (1 << 0)
+
+#if defined(NO_HARDWARE)
+static IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr;
+#endif
+
+IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32  Ioctl,
+                                                                  IMG_BYTE             *pInBuf,
+                                                                  IMG_UINT32   InBufLen,
+                                                                  IMG_BYTE             *pOutBuf,
+                                                                  IMG_UINT32   OutBufLen,
+                                                                  IMG_UINT32   *pdwBytesTransferred);
+
+#if defined(SGX_OCP_REGS_ENABLED)
+
+#define SYS_OMAP4430_OCP_REGS_SYS_PHYS_BASE            (SYS_OMAP4430_SGX_REGS_SYS_PHYS_BASE + EUR_CR_OCP_REVISION)
+#define SYS_OMAP4430_OCP_REGS_SIZE                             0x110
+
+static IMG_CPU_VIRTADDR gpvOCPRegsLinAddr;
+
+static PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
+{
+       PVRSRV_ERROR eError = EnableSGXClocks(psSysData);
+
+       if(eError == PVRSRV_OK)
+       {
+               OSWriteHWReg(gpvOCPRegsLinAddr,
+                                        EUR_CR_OCP_SYSCONFIG - EUR_CR_OCP_REVISION,
+                                        0x14); 
+               OSWriteHWReg(gpvOCPRegsLinAddr,
+                                        EUR_CR_OCP_DEBUG_CONFIG - EUR_CR_OCP_REVISION,
+                                        EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
+       }
+
+       return eError;
+}
+
+#else 
+
+static INLINE PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
+{
+       return EnableSGXClocks(psSysData);
+}
+
+#endif 
+
+static INLINE PVRSRV_ERROR EnableSystemClocksWrap(SYS_DATA *psSysData)
+{
+       PVRSRV_ERROR eError = EnableSystemClocks(psSysData);
+
+#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+       if(eError == PVRSRV_OK)
+       {
+               
+               EnableSGXClocksWrap(psSysData);
+       }
+#endif
+
+       return eError;
+}
+
+static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
+{
+#if defined(NO_HARDWARE)
+       PVRSRV_ERROR eError;
+       IMG_CPU_PHYADDR sCpuPAddr;
+#endif
+
+       PVR_UNREFERENCED_PARAMETER(psSysData);
+
+       
+       gsSGXDeviceMap.ui32Flags = 0x0;
+       
+#if defined(NO_HARDWARE)
+       
+       
+       eError = OSBaseAllocContigMemory(SYS_OMAP4430_SGX_REGS_SIZE, 
+                                                                        &gsSGXRegsCPUVAddr,
+                                                                        &sCpuPAddr);
+       if(eError != PVRSRV_OK)
+       {
+               return eError;
+       }
+       gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;
+       gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);
+       gsSGXDeviceMap.ui32RegsSize = SYS_OMAP4430_SGX_REGS_SIZE;
+#if defined(__linux__)
+       
+       gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+#else
+       
+       gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL;
+#endif
+
+       OSMemSet(gsSGXRegsCPUVAddr, 0, SYS_OMAP4430_SGX_REGS_SIZE);
+
+       
+
+
+       gsSGXDeviceMap.ui32IRQ = 0;
+
+#else 
+
+       gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP4430_SGX_REGS_SYS_PHYS_BASE;
+       gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
+       gsSGXDeviceMap.ui32RegsSize = SYS_OMAP4430_SGX_REGS_SIZE;
+
+       gsSGXDeviceMap.ui32IRQ = SYS_OMAP4430_SGX_IRQ;
+
+#endif 
+
+#if defined(PDUMP)
+       {
+               
+               static IMG_CHAR pszPDumpDevName[] = "SGXMEM";
+               gsSGXDeviceMap.pszPDumpDevName = pszPDumpDevName;
+       }
+#endif
+
+       
+
+
+       return PVRSRV_OK;
+}
+
+
+IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion)
+{
+       static IMG_CHAR aszVersionString[100];
+       SYS_DATA        *psSysData;
+       IMG_UINT32      ui32SGXRevision;
+       IMG_INT32       i32Count;
+#if !defined(NO_HARDWARE)
+       IMG_VOID        *pvRegsLinAddr;
+
+       pvRegsLinAddr = OSMapPhysToLin(sRegRegion,
+                                                                  SYS_OMAP4430_SGX_REGS_SIZE,
+                                                                  PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+                                                                  IMG_NULL);
+       if(!pvRegsLinAddr)
+       {
+               return IMG_NULL;
+       }
+
+       ui32SGXRevision = OSReadHWReg((IMG_PVOID)((IMG_PBYTE)pvRegsLinAddr),
+                                                                 EUR_CR_CORE_REVISION);
+#else
+       ui32SGXRevision = 0;
+#endif
+
+       SysAcquireData(&psSysData);
+
+       i32Count = OSSNPrintf(aszVersionString, 100,
+                                                 "SGX revision = %u.%u.%u",
+                                                 (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
+                                                       >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
+                                                 (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
+                                                       >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
+                                                 (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
+                                                       >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
+                                                );
+
+#if !defined(NO_HARDWARE)
+       OSUnMapPhysToLin(pvRegsLinAddr,
+                                        SYS_OMAP4430_SGX_REGS_SIZE,
+                                        PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+                                        IMG_NULL);
+#endif
+
+       if(i32Count == -1)
+       {
+               return IMG_NULL;
+       }
+
+       return aszVersionString;
+}
+
+
+PVRSRV_ERROR SysInitialise(IMG_VOID)
+{
+       IMG_UINT32                      i;
+       PVRSRV_ERROR            eError;
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+       IMG_CPU_PHYADDR         TimerRegPhysBase;
+#if !defined(SGX_DYNAMIC_TIMING_INFO)
+       SGX_TIMING_INFORMATION* psTimingInfo;
+#endif
+       gpsSysData = &gsSysData;
+       OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
+
+       gpsSysSpecificData =  &gsSysSpecificData;
+       OSMemSet(gpsSysSpecificData, 0, sizeof(SYS_SPECIFIC_DATA));
+
+       gpsSysData->pvSysSpecificData = gpsSysSpecificData;
+
+       eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
+               (IMG_VOID)SysDeinitialise(gpsSysData);
+               gpsSysData = IMG_NULL;
+               return eError;
+       }
+       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA);
+
+       gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
+
+       
+       for(i=0; i<SYS_DEVICE_COUNT; i++)
+       {
+               gpsSysData->sDeviceID[i].uiID = i;
+               gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
+       }
+
+       gpsSysData->psDeviceNodeList = IMG_NULL;
+       gpsSysData->psQueueList = IMG_NULL;
+
+       eError = SysInitialiseCommon(gpsSysData);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
+               (IMG_VOID)SysDeinitialise(gpsSysData);
+               gpsSysData = IMG_NULL;
+               return eError;
+       }
+
+       TimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_REGS_SYS_PHYS_BASE;
+       gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
+       gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
+       OSReservePhys(TimerRegPhysBase,
+                                 4,
+                                 PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+                                 (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
+                                 &gpsSysData->hSOCTimerRegisterOSMemHandle);
+
+#if !defined(SGX_DYNAMIC_TIMING_INFO)
+       
+       psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
+       psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
+       psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ; 
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+       psTimingInfo->bEnableActivePM = IMG_TRUE;
+#else  
+       psTimingInfo->bEnableActivePM = IMG_FALSE;
+#endif 
+       psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS; 
+       psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ; 
+#endif
+
+       
+
+       gpsSysSpecificData->ui32SrcClockDiv = 3;
+
+       
+
+
+
+       eError = SysLocateDevices(gpsSysData);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
+               (IMG_VOID)SysDeinitialise(gpsSysData);
+               gpsSysData = IMG_NULL;
+               return eError;
+       }
+       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);
+
+#if defined(SGX_OCP_REGS_ENABLED)
+       {
+               IMG_SYS_PHYADDR sOCPRegsSysPBase;
+               IMG_CPU_PHYADDR sOCPRegsCpuPBase;
+
+               sOCPRegsSysPBase.uiAddr = SYS_OMAP4430_OCP_REGS_SYS_PHYS_BASE;
+               sOCPRegsCpuPBase                = SysSysPAddrToCpuPAddr(sOCPRegsSysPBase);
+
+               gpvOCPRegsLinAddr               = OSMapPhysToLin(sOCPRegsCpuPBase,
+                                                                                                SYS_OMAP4430_OCP_REGS_SIZE,
+                                                                                                PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+                                                                                                IMG_NULL);
+
+               if (gpvOCPRegsLinAddr == IMG_NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to map OCP registers"));
+                       return PVRSRV_ERROR_BAD_MAPPING;
+               }
+               SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_OCPREGS);
+       }
+#endif
+
+       
+
+
+       eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
+                                                                 DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
+               (IMG_VOID)SysDeinitialise(gpsSysData);
+               gpsSysData = IMG_NULL;
+               return eError;
+       }
+       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_REGDEV);
+
+       
+
+
+       
+       psDeviceNode = gpsSysData->psDeviceNodeList;
+       while(psDeviceNode)
+       {
+               
+               switch(psDeviceNode->sDevId.eDeviceType)
+               {
+                       case PVRSRV_DEVICE_TYPE_SGX:
+                       {
+                               DEVICE_MEMORY_INFO *psDevMemoryInfo;
+                               DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+
+                               
+
+
+                               psDeviceNode->psLocalDevMemArena = IMG_NULL;
+
+                               
+                               psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+                               psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+                               
+                               for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
+                               {
+                                       psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
+                               }
+
+                               gpsSGXDevNode = psDeviceNode;
+                               gsSysSpecificData.psSGXDevNode = psDeviceNode;
+
+                               break;
+                       }
+                       default:
+                               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
+                               return PVRSRV_ERROR_INIT_FAILURE;
+               }
+
+               
+               psDeviceNode = psDeviceNode->psNext;
+       }
+
+       eError = EnableSystemClocksWrap(gpsSysData);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable system clocks (%d)", eError));
+               (IMG_VOID)SysDeinitialise(gpsSysData);
+               gpsSysData = IMG_NULL;
+               return eError;
+       }
+       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+       eError = EnableSGXClocksWrap(gpsSysData);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
+               (IMG_VOID)SysDeinitialise(gpsSysData);
+               gpsSysData = IMG_NULL;
+               return eError;
+       }
+#endif 
+
+       eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
+               (IMG_VOID)SysDeinitialise(gpsSysData);
+               gpsSysData = IMG_NULL;
+               return eError;
+       }
+       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV);
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+       
+       DisableSGXClocks(gpsSysData);
+#endif 
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysFinalise(IMG_VOID)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+       eError = EnableSGXClocksWrap(gpsSysData);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
+               return eError;
+       }
+#endif 
+
+       eError = OSInstallMISR(gpsSysData);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install MISR"));
+               return eError;
+       }
+       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR);
+
+#if defined(SYS_USING_INTERRUPTS)
+       
+       eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install ISR"));
+               return eError;
+       }
+       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+#endif 
+
+       
+       gpsSysData->pszVersionString = SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
+       if (!gpsSysData->pszVersionString)
+       {
+               PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string"));
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
+       }
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+       
+       DisableSGXClocks(gpsSysData);
+#endif 
+
+       gpsSysSpecificData->bSGXInitComplete = IMG_TRUE;
+
+       return eError;
+}
+
+
+PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
+{
+       PVRSRV_ERROR eError;
+
+#if defined(SYS_USING_INTERRUPTS)
+       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
+       {
+               eError = OSUninstallDeviceLISR(psSysData);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallDeviceLISR failed"));
+                       return eError;
+               }
+       }
+#endif
+
+       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR))
+       {
+               eError = OSUninstallMISR(psSysData);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
+                       return eError;
+               }
+       }
+
+       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV))
+       {
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+               PVR_ASSERT(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS));
+               
+               eError = EnableSGXClocksWrap(gpsSysData);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: EnableSGXClocks failed"));
+                       return eError;
+               }
+#endif 
+
+               
+               eError = PVRSRVDeinitialiseDevice (gui32SGXDeviceID);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
+                       return eError;
+               }
+       }
+       
+#if defined(SGX_OCP_REGS_ENABLED)
+       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_OCPREGS))
+       {
+               OSUnMapPhysToLin(gpvOCPRegsLinAddr,
+                                                SYS_OMAP4430_OCP_REGS_SIZE,
+                                                PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+                                                IMG_NULL);
+       }
+#endif
+
+       
+
+       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
+       {
+               DisableSystemClocks(gpsSysData);
+       }
+
+       if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA))
+       {       
+               eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
+                       return eError;
+               }
+       }
+
+       if(gpsSysData->pvSOCTimerRegisterKM)
+       {
+               OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
+                                               4,
+                                               PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+                                               gpsSysData->hSOCTimerRegisterOSMemHandle);
+       }
+
+       SysDeinitialiseCommon(gpsSysData);
+
+#if defined(NO_HARDWARE)
+       if(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV))
+       {
+               
+               OSBaseFreeContigMemory(SYS_OMAP4430_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase);
+       }
+#endif
+
+       
+       gpsSysSpecificData->ui32SysSpecificData = 0;
+       gpsSysSpecificData->bSGXInitComplete = IMG_FALSE;
+
+       gpsSysData = IMG_NULL;
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE  eDeviceType,
+                                                                  IMG_VOID                             **ppvDeviceMap)
+{
+
+       switch(eDeviceType)
+       {
+               case PVRSRV_DEVICE_TYPE_SGX:
+               {
+                       
+                       *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
+
+                       break;
+               }
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
+               }
+       }
+       return PVRSRV_OK;
+}
+
+
+IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE       eDeviceType,
+                                                                         IMG_CPU_PHYADDR               CpuPAddr)
+{
+       IMG_DEV_PHYADDR DevPAddr;
+
+       PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+       
+       DevPAddr.uiAddr = CpuPAddr.uiAddr;
+       
+       return DevPAddr;
+}
+
+IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
+{
+       IMG_CPU_PHYADDR cpu_paddr;
+
+       
+       cpu_paddr.uiAddr = sys_paddr.uiAddr;
+       return cpu_paddr;
+}
+
+IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
+{
+       IMG_SYS_PHYADDR sys_paddr;
+
+       
+       sys_paddr.uiAddr = cpu_paddr.uiAddr;
+       return sys_paddr;
+}
+
+
+IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
+{
+       IMG_DEV_PHYADDR DevPAddr;
+
+       PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+       
+       DevPAddr.uiAddr = SysPAddr.uiAddr;
+
+       return DevPAddr;
+}
+
+
+IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
+{
+       IMG_SYS_PHYADDR SysPAddr;
+
+       PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+       
+       SysPAddr.uiAddr = DevPAddr.uiAddr;
+
+       return SysPAddr;
+}
+
+
+IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+
+IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+
+IMG_UINT32 SysGetInterruptSource(SYS_DATA                      *psSysData,
+                                                                PVRSRV_DEVICE_NODE     *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psSysData);
+#if defined(NO_HARDWARE)
+       
+       return 0xFFFFFFFF;
+#else
+       
+       return psDeviceNode->ui32SOCInterruptBit;
+#endif
+}
+
+
+IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
+{
+       PVR_UNREFERENCED_PARAMETER(psSysData);
+       PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
+
+       
+       OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM,
+                                                                               EUR_CR_EVENT_HOST_CLEAR);
+}
+
+
+PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (eNewPowerState == PVRSRV_SYS_POWER_STATE_D3)
+       {
+               PVR_TRACE(("SysSystemPrePowerState: Entering state D3"));
+
+#if defined(SYS_USING_INTERRUPTS)
+               if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
+               {
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+                       IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData);
+#endif
+                       eError = OSUninstallDeviceLISR(gpsSysData);
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+                       if (bWrapped)
+                       {
+                               UnwrapSystemPowerChange(&gsSysSpecificData);
+                       }
+#endif
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSUninstallDeviceLISR failed (%d)", eError));
+                               return eError;
+                       }
+                       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
+                       SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+               }
+#endif
+
+               if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
+               {
+                       DisableSystemClocks(gpsSysData);
+
+                       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);
+                       SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+               }
+       }
+
+       return eError;
+}
+
+
+PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (eNewPowerState == PVRSRV_SYS_POWER_STATE_D0)
+       {
+               PVR_TRACE(("SysSystemPostPowerState: Entering state D0"));
+
+               if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS))
+               {
+                       eError = EnableSystemClocksWrap(gpsSysData);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: EnableSystemClocksWrap failed (%d)", eError));
+                               return eError;
+                       }
+                       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+                       SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);
+               }
+
+#if defined(SYS_USING_INTERRUPTS)
+               if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR))
+               {
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+                       IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData);
+#endif
+
+                       eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+                       if (bWrapped)
+                       {
+                               UnwrapSystemPowerChange(&gsSysSpecificData);
+                       }
+#endif
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSInstallDeviceLISR failed to install ISR (%d)", eError));
+                               return eError;
+                       }
+                       SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+                       SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
+               }
+#endif
+       }
+       return eError;
+}
+
+
+PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32                         ui32DeviceIndex,
+                                                                       PVRSRV_DEV_POWER_STATE  eNewPowerState,
+                                                                       PVRSRV_DEV_POWER_STATE  eCurrentPowerState)
+{
+       PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
+
+       if (ui32DeviceIndex != gui32SGXDeviceID)
+       {
+               return PVRSRV_OK;
+       }
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+       if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePrePowerState: SGX Entering state D3"));
+               DisableSGXClocks(gpsSysData);
+       }
+#else  
+       PVR_UNREFERENCED_PARAMETER(eNewPowerState );
+#endif 
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32                                ui32DeviceIndex,
+                                                                        PVRSRV_DEV_POWER_STATE eNewPowerState,
+                                                                        PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_UNREFERENCED_PARAMETER(eNewPowerState);
+
+       if (ui32DeviceIndex != gui32SGXDeviceID)
+       {
+               return eError;
+       }
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+       if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePostPowerState: SGX Leaving state D3"));
+               eError = EnableSGXClocksWrap(gpsSysData);
+       }
+#else  
+       PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
+#endif 
+
+       return eError;
+}
+
+
+PVRSRV_ERROR SysOEMFunction (  IMG_UINT32      ui32ID,
+                                                               IMG_VOID        *pvIn,
+                                                               IMG_UINT32      ulInSize,
+                                                               IMG_VOID        *pvOut,
+                                                               IMG_UINT32      ulOutSize)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32ID);
+       PVR_UNREFERENCED_PARAMETER(pvIn);
+       PVR_UNREFERENCED_PARAMETER(ulInSize);
+       PVR_UNREFERENCED_PARAMETER(pvOut);
+       PVR_UNREFERENCED_PARAMETER(ulOutSize);
+
+       if ((ui32ID == OEM_GET_EXT_FUNCS) &&
+               (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
+       {
+               
+               PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*) pvOut;
+               psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM;
+               return PVRSRV_OK;
+       }
+
+       return PVRSRV_ERROR_INVALID_PARAMS;
+}
similarity index 65%
rename from services4/system/ti8168/sysconfig.h
rename to services4/system/omap4/sysconfig.h
index 516155e..ffb02f2 100644 (file)
@@ -1,53 +1,65 @@
-/**********************************************************************\r
- *\r
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.\r
- * \r
- * This program is free software; you can redistribute it and/or modify it\r
- * under the terms and conditions of the GNU General Public License,\r
- * version 2, as published by the Free Software Foundation.\r
- * \r
- * This program is distributed in the hope it will be useful but, except \r
- * as otherwise stated in writing, without any warranty; without even the \r
- * implied warranty of merchantability or fitness for a particular purpose. \r
- * See the GNU General Public License for more details.\r
- * \r
- * You should have received a copy of the GNU General Public License along with\r
- * this program; if not, write to the Free Software Foundation, Inc.,\r
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\r
- * \r
- * The full GNU General Public License is included in this distribution in\r
- * the file called "COPYING".\r
- *\r
- * Contact Information:\r
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>\r
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK \r
- *\r
- ******************************************************************************/\r
-\r
-#if !defined(__SOCCONFIG_H__)\r
-#define __SOCCONFIG_H__\r
-\r
-#include "syscommon.h"\r
-\r
-#define VS_PRODUCT_NAME        "OMAP3630"\r
-\r
-#define SYS_SGX_CLOCK_SPEED    330000000\r
-#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ                (100)   \r
-#define SYS_SGX_PDS_TIMER_FREQ                 (1000)  \r
-#define SYS_SGX_ACTIVE_POWER_LATENCY_MS                (1)\r
-\r
-\r
-#define        SYS_OMAP3430_VDD2_OPP3_SGX_CLOCK_SPEED SYS_SGX_CLOCK_SPEED\r
-#define SYS_OMAP3430_VDD2_OPP2_SGX_CLOCK_SPEED (SYS_SGX_CLOCK_SPEED / 2)\r
-\r
-#define SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE  0x56000000\r
-#define SYS_OMAP3430_SGX_REGS_SIZE           0x10000\r
-\r
-#define SYS_OMAP3430_SGX_IRQ                           37 \r
-\r
-#define SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE      0x48048038\r
-#define SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE       0x4804803C\r
-#define SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE      0x48048054\r
-\r
\r
-#endif \r
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#if !defined(__SOCCONFIG_H__)
+#define __SOCCONFIG_H__
+
+#include "syscommon.h"
+
+#define VS_PRODUCT_NAME        "OMAP4"
+
+#if defined(SGX_CLK_PER_192)
+#define SYS_SGX_CLOCK_SPEED     192000000
+#else
+ #if defined(SGX_CLK_CORE_DIV8)
+ #define SYS_SGX_CLOCK_SPEED    190464000
+ #else
+  #if defined(SGX_CLK_CORE_DIV5)
+  #define SYS_SGX_CLOCK_SPEED   304742400
+  #endif
+ #endif
+#endif
+
+#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ                (100)   
+#define SYS_SGX_PDS_TIMER_FREQ                         (1000)  
+
+#if !defined(SYS_SGX_ACTIVE_POWER_LATENCY_MS)
+#define SYS_SGX_ACTIVE_POWER_LATENCY_MS                (1)
+#endif
+
+
+#define SYS_OMAP4430_SGX_REGS_SYS_PHYS_BASE  0x56000000
+
+#define SYS_OMAP4430_SGX_REGS_SIZE           0xFFFF
+
+#define SYS_OMAP4430_SGX_IRQ                            53 
+
+#define SYS_OMAP4430_GP11TIMER_ENABLE_SYS_PHYS_BASE  0x48088038
+#define SYS_OMAP4430_GP11TIMER_REGS_SYS_PHYS_BASE       0x4808803C
+#define SYS_OMAP4430_GP11TIMER_TSICR_SYS_PHYS_BASE      0x48088054
+
+#endif 
diff --git a/services4/system/omap4/sysinfo.h b/services4/system/omap4/sysinfo.h
new file mode 100644 (file)
index 0000000..f9d7e68
--- /dev/null
@@ -0,0 +1,42 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#define MAX_HW_TIME_US                         (1000000)
+#define WAIT_TRY_COUNT                         (20000)
+#else
+#define MAX_HW_TIME_US                         (500000)
+#define WAIT_TRY_COUNT                         (10000)
+#endif
+
+
+//#define SYS_DEVICE_COUNT 15 
+#define SYS_DEVICE_COUNT 3
+
+#endif 
similarity index 94%
rename from services4/system/ti8168/syslocal.h
rename to services4/system/omap4/syslocal.h
index 74de28f..d3e5210 100644 (file)
-/**********************************************************************\r
- *\r
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.\r
- * \r
- * This program is free software; you can redistribute it and/or modify it\r
- * under the terms and conditions of the GNU General Public License,\r
- * version 2, as published by the Free Software Foundation.\r
- * \r
- * This program is distributed in the hope it will be useful but, except \r
- * as otherwise stated in writing, without any warranty; without even the \r
- * implied warranty of merchantability or fitness for a particular purpose. \r
- * See the GNU General Public License for more details.\r
- * \r
- * You should have received a copy of the GNU General Public License along with\r
- * this program; if not, write to the Free Software Foundation, Inc.,\r
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\r
- * \r
- * The full GNU General Public License is included in this distribution in\r
- * the file called "COPYING".\r
- *\r
- * Contact Information:\r
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>\r
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK \r
- *\r
- ******************************************************************************/\r
-\r
-#if !defined(__SYSLOCAL_H__)\r
-#define __SYSLOCAL_H__\r
-\r
-#if defined(__linux__)\r
-\r
-#include <linux/version.h>\r
-#include <linux/clk.h>\r
-#include <linux/spinlock.h>\r
-#include <asm/atomic.h>\r
-\r
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))\r
-#include <linux/semaphore.h>\r
-#include <linux/resource.h>\r
-#else \r
-#include <asm/semaphore.h>\r
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))\r
-#include <asm/arch/resource.h>\r
-#endif \r
-#endif \r
-\r
-#endif \r
-\r
-#if defined (__cplusplus)\r
-extern "C" {\r
-#endif\r
-\r
\r
\r
-IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion);\r
-\r
-IMG_VOID DisableSystemClocks(SYS_DATA *psSysData);\r
-PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData);\r
-\r
-IMG_VOID DisableSGXClocks(SYS_DATA *psSysData);\r
-PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData);\r
-\r
-#define SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS     0x00000001\r
-#define SYS_SPECIFIC_DATA_ENABLE_LISR          0x00000002\r
-#define SYS_SPECIFIC_DATA_ENABLE_MISR          0x00000004\r
-#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA       0x00000008\r
-#define SYS_SPECIFIC_DATA_ENABLE_LOCDEV                0x00000010\r
-#define SYS_SPECIFIC_DATA_ENABLE_REGDEV                0x00000020\r
-#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT     0x00000040\r
-#define SYS_SPECIFIC_DATA_ENABLE_INITDEV       0x00000080\r
-#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV     0x00000100\r
-\r
-#define        SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR     0x00000200\r
-#define        SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS  0x00000400\r
-\r
-#define        SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))\r
-\r
-#define        SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))\r
-\r
-#define        SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)\r
\r
-typedef struct _SYS_SPECIFIC_DATA_TAG_\r
-{\r
-       IMG_UINT32      ui32SysSpecificData;\r
-       PVRSRV_DEVICE_NODE *psSGXDevNode;\r
-       IMG_BOOL        bSGXInitComplete;\r
-#if !defined(__linux__)\r
-       IMG_BOOL        bSGXClocksEnabled;\r
-#endif\r
-       IMG_UINT32      ui32SrcClockDiv;\r
-#if defined(__linux__)\r
-       IMG_BOOL        bSysClocksOneTimeInit;\r
-       IMG_BOOL        bConstraintNotificationsEnabled;\r
-       atomic_t        sSGXClocksEnabled;\r
-       spinlock_t      sPowerLock;\r
-       atomic_t        sPowerLockCPU;\r
-       spinlock_t      sNotifyLock;\r
-       atomic_t        sNotifyLockCPU;\r
-       IMG_BOOL        bCallVDD2PostFunc;\r
-\r
-       struct clk      *psCORE_CK;\r
-       struct clk      *psSGX_FCK;\r
-       struct clk      *psSGX_ICK;\r
-       struct clk      *psMPU_CK;\r
-#if defined(DEBUG) || defined(TIMING)\r
-       struct clk      *psGPT11_FCK;\r
-       struct clk      *psGPT11_ICK;\r
-#endif\r
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))              \r
-       struct constraint_handle *pVdd2Handle;\r
-#endif \r
-#endif \r
-} SYS_SPECIFIC_DATA;\r
-\r
-extern SYS_SPECIFIC_DATA *gpsSysSpecificData;\r
-\r
-#if defined(SYS_CUSTOM_POWERLOCK_WRAP)\r
-IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);\r
-IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);\r
-#endif\r
-\r
-#if defined(__cplusplus)\r
-}\r
-#endif\r
-\r
-#endif \r
-\r
-\r
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#if !defined(__SYSLOCAL_H__)
+#define __SYSLOCAL_H__
+
+#if defined(__linux__)
+
+#include <linux/version.h>
+#include <linux/clk.h>
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#include <linux/mutex.h>
+#else
+#include <linux/spinlock.h>
+#endif
+#include <asm/atomic.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
+#include <linux/semaphore.h>
+#include <linux/resource.h>
+#else 
+#include <asm/semaphore.h>
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
+#include <asm/arch/resource.h>
+#endif 
+#endif 
+
+#endif 
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion);
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData);
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData);
+
+IMG_VOID DisableSGXClocks(SYS_DATA *psSysData);
+PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData);
+
+#define SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS     0x00000001
+#define SYS_SPECIFIC_DATA_ENABLE_LISR          0x00000002
+#define SYS_SPECIFIC_DATA_ENABLE_MISR          0x00000004
+#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA       0x00000008
+#define SYS_SPECIFIC_DATA_ENABLE_LOCDEV                0x00000010
+#define SYS_SPECIFIC_DATA_ENABLE_REGDEV                0x00000020
+#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT     0x00000040
+#define SYS_SPECIFIC_DATA_ENABLE_INITDEV       0x00000080
+#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV     0x00000100
+
+#define        SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR     0x00000200
+#define        SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS  0x00000400
+#define SYS_SPECIFIC_DATA_ENABLE_OCPREGS       0x00000800
+
+#define        SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
+
+#define        SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
+
+#define        SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
+typedef struct _SYS_SPECIFIC_DATA_TAG_
+{
+       IMG_UINT32      ui32SysSpecificData;
+       PVRSRV_DEVICE_NODE *psSGXDevNode;
+       IMG_BOOL        bSGXInitComplete;
+#if !defined(__linux__)
+       IMG_BOOL        bSGXClocksEnabled;
+#endif
+       IMG_UINT32      ui32SrcClockDiv;
+#if defined(__linux__)
+       IMG_BOOL        bSysClocksOneTimeInit;
+       atomic_t        sSGXClocksEnabled;
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+       struct mutex    sPowerLock;
+#else
+       IMG_BOOL        bConstraintNotificationsEnabled;
+       spinlock_t      sPowerLock;
+       atomic_t        sPowerLockCPU;
+       spinlock_t      sNotifyLock;
+       atomic_t        sNotifyLockCPU;
+       IMG_BOOL        bCallVDD2PostFunc;
+#endif
+       struct clk      *psCORE_CK;
+       struct clk      *psSGX_FCK;
+       struct clk      *psSGX_ICK;
+       struct clk      *psMPU_CK;
+#if defined(DEBUG) || defined(TIMING)
+       struct clk      *psGPT11_FCK;
+       struct clk      *psGPT11_ICK;
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))              
+       struct constraint_handle *pVdd2Handle;
+#endif 
+#endif 
+} SYS_SPECIFIC_DATA;
+
+extern SYS_SPECIFIC_DATA *gpsSysSpecificData;
+
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
+IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif 
+
+
similarity index 97%
rename from services4/system/ti8168/sysutils.c
rename to services4/system/omap4/sysutils.c
index cb3cee7..02c9831 100644 (file)
@@ -1,30 +1,29 @@
-/**********************************************************************\r
- *\r
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.\r
- * \r
- * This program is free software; you can redistribute it and/or modify it\r
- * under the terms and conditions of the GNU General Public License,\r
- * version 2, as published by the Free Software Foundation.\r
- * \r
- * This program is distributed in the hope it will be useful but, except \r
- * as otherwise stated in writing, without any warranty; without even the \r
- * implied warranty of merchantability or fitness for a particular purpose. \r
- * See the GNU General Public License for more details.\r
- * \r
- * You should have received a copy of the GNU General Public License along with\r
- * this program; if not, write to the Free Software Foundation, Inc.,\r
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\r
- * \r
- * The full GNU General Public License is included in this distribution in\r
- * the file called "COPYING".\r
- *\r
- * Contact Information:\r
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>\r
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK \r
- *\r
- ******************************************************************************/\r
-\r
-#if defined(__linux__)\r
-#include "sysutils_linux.c"\r
-#endif\r
-\r
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope it will be useful but, except 
+ * as otherwise stated in writing, without any warranty; without even the 
+ * implied warranty of merchantability or fitness for a particular purpose. 
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+ *
+ ******************************************************************************/
+
+#if defined(__linux__)
+#include "sysutils_linux.c"
+#endif
 #error "PVR_LINUX_USING_WORKQUEUES must be defined"
 #endif
 
+#if ((defined(DEBUG) || defined(TIMING)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)))
+//#define      PVR_OMAP4_TIMING_PRCM
+#endif
+
 #define        ONE_MHZ 1000000
 #define        HZ_TO_MHZ(m) ((m) / ONE_MHZ)
 
@@ -122,7 +126,11 @@ IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psTimingInfo)
 #else
        PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
 
+#if defined(OMAP4_PRCM_ENABLE)
        rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
+#else
+       rate = SYS_SGX_CLOCK_SPEED;
+#endif
        PVR_ASSERT(rate != 0);
 #endif
        psTimingInfo->ui32CoreClockSpeed = rate;
@@ -140,9 +148,11 @@ PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
 {
 #if !defined(NO_HARDWARE)
        SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+#if defined(OMAP4_PRCM_ENABLE)
        long lNewRate;
        long lRate;
        IMG_INT res;
+#endif
 
        
        if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
@@ -152,9 +162,10 @@ PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
 
        PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));
 
+#if defined(OMAP4_PRCM_ENABLE)
+
 #if defined(DEBUG)
        {
-
                IMG_UINT32 rate = clk_get_rate(psSysSpecData->psMPU_CK);
                PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: CPU Clock is %dMhz", HZ_TO_MHZ(rate)));
        }
@@ -201,6 +212,8 @@ PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
        }
 #endif
 
+#endif 
+
        
        atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);
 
@@ -224,6 +237,7 @@ IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
 
        PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));
 
+#if defined(OMAP4_PRCM_ENABLE)
        if (psSysSpecData->psSGX_ICK)
        {
                clk_disable(psSysSpecData->psSGX_ICK);
@@ -233,6 +247,7 @@ IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
        {
                clk_disable(psSysSpecData->psSGX_FCK);
        }
+#endif 
 
        
        atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
@@ -244,18 +259,23 @@ IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
 
 PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
 {
+
+       PVR_TRACE(("Enter Enabling System Clocks"));
+       PVRSRV_ERROR eError;
        SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+#if (defined(OMAP4_PRCM_ENABLE) || defined(PVR_OMAP4_TIMING_PRCM))
        struct clk *psCLK;
        IMG_INT res;
-       PVRSRV_ERROR eError;
-
-#if defined(DEBUG) || defined(TIMING)
-       IMG_INT rate;
+#endif
+#if defined(PVR_OMAP4_TIMING_PRCM)
        struct clk *sys_ck;
+       IMG_INT rate;
+#endif
+
+#if (defined(DEBUG) || defined(TIMING))
        IMG_CPU_PHYADDR     TimerRegPhysBase;
        IMG_HANDLE hTimerEnable;
        IMG_UINT32 *pui32TimerEnable;
-
 #endif 
 
        PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
@@ -266,6 +286,7 @@ PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
 
                atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
 
+#if defined(OMAP4_PRCM_ENABLE)
                psCLK = clk_get(NULL, SGX_PARENT_CLOCK);
                if (IS_ERR(psCLK))
                {
@@ -305,13 +326,15 @@ PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
                        PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set SGX parent clock (%d)", res));
                        goto ExitError;
                }
+#endif 
 
                psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
        }
 
-#if defined(DEBUG) || defined(TIMING)
-       
-       psCLK = clk_get(NULL, "gpt11_fck");
+#if (defined(DEBUG) || defined(TIMING))
+
+#if defined(PVR_OMAP4_TIMING_PRCM)
+psCLK = clk_get(NULL, "timer11_fck");
        if (IS_ERR(psCLK))
        {
                PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
@@ -319,21 +342,23 @@ PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
        }
        psSysSpecData->psGPT11_FCK = psCLK;
 
-       psCLK = clk_get(NULL, "gpt11_ick");
+       PVR_TRACE(("EnableSystemClocks: After Timer 11 clk_get"));
+/*     psCLK = clk_get(NULL, "gpt11_ick");
        if (IS_ERR(psCLK))
        {
                PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
                goto ExitUnRegisterConstraintNotifications;
        }
        psSysSpecData->psGPT11_ICK = psCLK;
-
-       sys_ck = clk_get(NULL, "sys_ck");
+*/
+       sys_ck = clk_get(NULL, "sys_clkin_ck");
        if (IS_ERR(sys_ck))
        {
                PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
                goto ExitUnRegisterConstraintNotifications;
        }
 
+       PVR_TRACE(("EnableSystemClocks: After system clk_get"));
        if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck)
        {
                PVR_TRACE(("Setting GPTIMER11 parent to System Clock"));
@@ -345,6 +370,7 @@ PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
                }
        }
 
+       PVR_TRACE(("EnableSystemClocks: After set parent "));
        rate = clk_get_rate(psSysSpecData->psGPT11_FCK);
        PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate)));
 
@@ -355,42 +381,43 @@ PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
                goto ExitUnRegisterConstraintNotifications;
        }
 
-       res = clk_enable(psSysSpecData->psGPT11_ICK);
+       PVR_TRACE(("EnableSystemClocks: After clk enable "));
+/*     res = clk_enable(psSysSpecData->psGPT11_ICK);
        if (res < 0)
        {
                PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
                goto ExitDisableGPT11FCK;
        }
+*/
+#endif 
 
-       
-       TimerRegPhysBase.uiAddr = SYS_TI81xx_GP7TIMER_TSICR_SYS_PHYS_BASE;
+       TimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_TSICR_SYS_PHYS_BASE;
        pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
                   4,
                   PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
                   &hTimerEnable);
 
+       PVR_TRACE(("EnableSystemClocks: After OSMapPhystoLin "));
        if (pui32TimerEnable == IMG_NULL)
        {
                PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
                goto ExitDisableGPT11ICK;
        }
 
-       rate = *pui32TimerEnable;
-       if(!(rate & 4))
+       if(!(*pui32TimerEnable & 4))
        {
                PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
 
                
-               *pui32TimerEnable = rate | 4;
+               *pui32TimerEnable |= 4;
        }
 
        OSUnMapPhysToLin(pui32TimerEnable,
                    4,
                    PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
                    hTimerEnable);
-
        
-       TimerRegPhysBase.uiAddr = SYS_TI81xx_GP7TIMER_ENABLE_SYS_PHYS_BASE;
+       TimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
        pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
                   4,
                   PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
@@ -404,25 +431,27 @@ PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
 
        
        *pui32TimerEnable = 3;
-
        OSUnMapPhysToLin(pui32TimerEnable,
                    4,
                    PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
                    hTimerEnable);
-
-#endif 
-
+#endif
+       PVR_TRACE(("Exit Enabling System Clocks"));
        eError = PVRSRV_OK;
        goto Exit;
 
-#if defined(DEBUG) || defined(TIMING)
+#if (defined(DEBUG) || defined(TIMING))
 ExitDisableGPT11ICK:
-       clk_disable(psSysSpecData->psGPT11_ICK);
+#if defined(PVR_OMAP4_TIMING_PRCM)
+//     clk_disable(psSysSpecData->psGPT11_ICK);
 ExitDisableGPT11FCK:
        clk_disable(psSysSpecData->psGPT11_FCK);
 ExitUnRegisterConstraintNotifications:
 #endif 
+#endif 
+#if defined(OMAP4_PRCM_ENABLE)
 ExitError:
+#endif
        eError = PVRSRV_ERROR_DISABLE_CLOCK_FAILURE;
 Exit:
        return eError;
@@ -430,8 +459,10 @@ Exit:
 
 IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
 {
-#if defined(DEBUG) || defined(TIMING)
+#if (defined(DEBUG) || defined(TIMING))
+#if defined(PVR_OMAP4_TIMING_PRCM)
        SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+#endif
        IMG_CPU_PHYADDR TimerRegPhysBase;
        IMG_HANDLE hTimerDisable;
        IMG_UINT32 *pui32TimerDisable;
@@ -442,14 +473,12 @@ IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
        
        DisableSGXClocks(psSysData);
 
-#if defined(DEBUG) || defined(TIMING)
-       
-       TimerRegPhysBase.uiAddr = SYS_TI81xx_GP7TIMER_ENABLE_SYS_PHYS_BASE;
+#if (defined(DEBUG) || defined(TIMING))
+       TimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
        pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,
                                4,
                                PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
                                &hTimerDisable);
-
        if (pui32TimerDisable == IMG_NULL)
        {
                PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: OSMapPhysToLin failed"));
@@ -463,10 +492,10 @@ IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
                                PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
                                hTimerDisable);
        }
-
-       clk_disable(psSysSpecData->psGPT11_ICK);
+#if defined(PVR_OMAP4_TIMING_PRCM)
+//     clk_disable(psSysSpecData->psGPT11_ICK);
 
        clk_disable(psSysSpecData->psGPT11_FCK);
-
+#endif 
 #endif 
 }
diff --git a/services4/system/ti8168/sysinfo.h b/services4/system/ti8168/sysinfo.h
deleted file mode 100644 (file)
index 88e3096..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/**********************************************************************\r
- *\r
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.\r
- * \r
- * This program is free software; you can redistribute it and/or modify it\r
- * under the terms and conditions of the GNU General Public License,\r
- * version 2, as published by the Free Software Foundation.\r
- * \r
- * This program is distributed in the hope it will be useful but, except \r
- * as otherwise stated in writing, without any warranty; without even the \r
- * implied warranty of merchantability or fitness for a particular purpose. \r
- * See the GNU General Public License for more details.\r
- * \r
- * You should have received a copy of the GNU General Public License along with\r
- * this program; if not, write to the Free Software Foundation, Inc.,\r
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\r
- * \r
- * The full GNU General Public License is included in this distribution in\r
- * the file called "COPYING".\r
- *\r
- * Contact Information:\r
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>\r
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK \r
- *\r
- ******************************************************************************/\r
-\r
-#if !defined(__SYSINFO_H__)\r
-#define __SYSINFO_H__\r
-\r
-#define MAX_HW_TIME_US                         (500000)\r
-#define WAIT_TRY_COUNT                         (10000)\r
-\r
-typedef enum _SYS_DEVICE_TYPE_\r
-{\r
-       SYS_DEVICE_SGX                                          = 0,\r
-\r
-       SYS_DEVICE_FORCE_I16                            = 0x7fff\r
-\r
-} SYS_DEVICE_TYPE;\r
-\r
-#define SYS_DEVICE_COUNT 3 \r
-\r
-#define PRM_REG32(offset)        (offset)\r
-#define CM_REG32(offset)         (offset)\r
-\r
-#define CM_FCLKEN_SGX          CM_REG32(0xB00)\r
-#define                CM_FCLKEN_SGX_EN_3D                                     0x00000002\r
-\r
-#define CM_ICLKEN_SGX          CM_REG32(0xB10)\r
-#define                CM_ICLKEN_SGX_EN_SGX                            0x00000001\r
-\r
-#define CM_IDLEST_SGX          CM_REG32(0xB20)\r
-#define                CM_IDLEST_SGX_ST_SGX                            0x00000001\r
-\r
-#define CM_CLKSEL_SGX          CM_REG32(0xB40)\r
-#define                CM_CLKSEL_SGX_MASK                                      0x0000000f\r
-#define                CM_CLKSEL_SGX_L3DIV3                            0x00000000\r
-#define                CM_CLKSEL_SGX_L3DIV4                            0x00000001\r
-#define                CM_CLKSEL_SGX_L3DIV6                            0x00000002\r
-#define                CM_CLKSEL_SGX_96M                                       0x00000003\r
-\r
-#define CM_SLEEPDEP_SGX                CM_REG32(0xB44)\r
-#define CM_CLKSTCTRL_SGX       CM_REG32(0xB48)\r
-#define        CM_CLKSTCTRL_SGX_AUTOSTATE                      0x00008001\r
-\r
-#define CM_CLKSTST_SGX         CM_REG32(0xB4C)\r
-#define        CM_CLKSTST_SGX_STATUS_VALID                     0x00000001\r
-\r
-#define RM_RSTST_SGX           PRM_REG32(0xB58)\r
-#define        RM_RSTST_SGX_RST_MASK                           0x0000000F\r
-#define        RM_RSTST_SGX_COREDOMAINWKUP_RST         0x00000008\r
-#define        RM_RSTST_SGX_DOMAINWKUP_RST                     0x00000004\r
-#define        RM_RSTST_SGX_GLOBALWARM_RST                     0x00000002\r
-#define        RM_RSTST_SGX_GLOBALCOLD_RST                     0x00000001\r
-\r
-#define PM_WKDEP_SGX           PRM_REG32(0xBC8)\r
-#define        PM_WKDEP_SGX_EN_WAKEUP                          0x00000010\r
-#define        PM_WKDEP_SGX_EN_MPU                                     0x00000002\r
-#define        PM_WKDEP_SGX_EN_CORE                            0x00000001\r
-\r
-#define PM_PWSTCTRL_SGX                PRM_REG32(0xBE0)\r
-#define                PM_PWSTCTRL_SGX_POWERSTATE_MASK         0x00000003\r
-#define                        PM_PWSTCTRL_SGX_OFF                             0x00000000\r
-#define                        PM_PWSTCTRL_SGX_RETENTION               0x00000001\r
-#define                        PM_PWSTCTRL_SGX_ON                              0x00000003\r
-\r
-#define PM_PWSTST_SGX          PRM_REG32(0xBE4)\r
-#define                PM_PWSTST_SGX_INTRANSITION                      0x00100000\r
-#define                PM_PWSTST_SGX_CLKACTIVITY                       0x00080000\r
-#define                PM_PWSTST_SGX_POWERSTATE_MASK           0x00000003\r
-#define                        PM_PWSTST_SGX_OFF                               0x00000003\r
-#define                        PM_PWSTST_SGX_RETENTION                 0x00000001\r
-#define                        PM_PWSTST_SGX_ON                                0x00000000\r
-\r
-#define PM_PREPWSTST_SGX       PRM_REG32(0xBE8)\r
-\r
-\r
-#endif \r
diff --git a/services4/system/ti8168/sysutils_linux.c b/services4/system/ti8168/sysutils_linux.c
deleted file mode 100644 (file)
index 17be8f3..0000000
+++ /dev/null
@@ -1,738 +0,0 @@
-/**********************************************************************\r
- *\r
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.\r
- *\r
- * This program is free software; you can redistribute it and/or modify it\r
- * under the terms and conditions of the GNU General Public License,\r
- * version 2, as published by the Free Software Foundation.\r
- *\r
- * This program is distributed in the hope it will be useful but, except\r
- * as otherwise stated in writing, without any warranty; without even the\r
- * implied warranty of merchantability or fitness for a particular purpose.\r
- * See the GNU General Public License for more details.\r
- *\r
- * You should have received a copy of the GNU General Public License along with\r
- * this program; if not, write to the Free Software Foundation, Inc.,\r
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\r
- *\r
- * The full GNU General Public License is included in this distribution in\r
- * the file called "COPYING".\r
- *\r
- * Contact Information:\r
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>\r
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK\r
- *\r
- ******************************************************************************/\r
-\r
-#include <linux/version.h>\r
-#include <linux/clk.h>\r
-#include <linux/err.h>\r
-#include <linux/hardirq.h>\r
-#include <linux/spinlock.h>\r
-#include <asm/bug.h>\r
-#include <linux/platform_device.h>\r
-\r
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31))\r
-#include <linux/semaphore.h>\r
-//#include <plat/resource.h>\r
-#include <plat/omap-pm.h>\r
-#else\r
-\r
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))\r
-#include <linux/semaphore.h>\r
-#include <mach/resource.h>\r
-#include <mach/omap-pm.h>\r
-#else\r
-#include <asm/semaphore.h>\r
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))\r
-#include <asm/arch/resource.h>\r
-#endif\r
-#endif\r
-#endif\r
-\r
-#if    (LINUX_VERSION_CODE >  KERNEL_VERSION(2,6,27)) && \\r
-       (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,29))\r
-#define CONSTRAINT_NOTIFICATIONS\r
-#endif\r
-#include "sgxdefs.h"\r
-#include "services_headers.h"\r
-#include "sysinfo.h"\r
-#include "sgxapi_km.h"\r
-#include "sysconfig.h"\r
-#include "sgxinfokm.h"\r
-#include "syslocal.h"\r
-void set_vdd2_constraint(void);\r
-void remove_vdd2_constraint(void);\r
-#define        ONE_MHZ 1000000\r
-#define        HZ_TO_MHZ(m) ((m) / ONE_MHZ)\r
-\r
-#if defined(SUPPORT_OMAP3630_SGXFCLK_96M)\r
-#define SGX_PARENT_CLOCK "cm_96m_fck"\r
-#elif defined(SUPPORT_OMAP3630_SGXFCLK_192M)\r
-#define SGX_PARENT_CLOCK "omap_192m_alwon_ck"\r
-#elif defined(SUPPORT_OMAP3630_SGXFCLK_corex2)\r
-#define SGX_PARENT_CLOCK "corex2_fck"\r
-#else\r
-#define SGX_PARENT_CLOCK "core_ck"\r
-#endif\r
-\r
-#if !defined(PDUMP) && !defined(NO_HARDWARE)\r
-struct sgx_platform_data {\r
-          void(*set_min_bus_tput)(struct device *dev, u8 agent_id, unsigned long r);\r
-};\r
-\r
-static struct sgx_platform_data pdata = {\r
-\r
-       //.set_min_bus_tput = &omap_pm_set_min_bus_tput,\r
-          .set_min_bus_tput = NULL,\r
-};\r
-\r
-static struct platform_device sgx_dev = {\r
-         .name = "sgx_dev",\r
-         .id = 1,\r
-         .dev.platform_data = &pdata,\r
-};\r
-void set_vdd2_constraint(void)\r
-{\r
-       if(pdata.set_min_bus_tput){\r
-               pdata.set_min_bus_tput(&(sgx_dev.dev), OCP_INITIATOR_AGENT,800000);\r
-       }\r
-}\r
-\r
-void remove_vdd2_constraint(void)\r
-{\r
-       if(pdata.set_min_bus_tput)\r
-               pdata.set_min_bus_tput(&(sgx_dev.dev), OCP_INITIATOR_AGENT, 0);\r
-\r
-}\r
-#endif\r
-#if !defined(PDUMP) && !defined(NO_HARDWARE)\r
-static IMG_BOOL PowerLockWrappedOnCPU(SYS_SPECIFIC_DATA *psSysSpecData)\r
-{\r
-       IMG_INT iCPU;\r
-       IMG_BOOL bLocked = IMG_FALSE;\r
-\r
-       if (!in_interrupt())\r
-       {\r
-               iCPU = get_cpu();\r
-               bLocked = (iCPU == atomic_read(&psSysSpecData->sPowerLockCPU));\r
-\r
-               put_cpu();\r
-       }\r
-\r
-       return bLocked;\r
-}\r
-\r
-static IMG_VOID PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData)\r
-{\r
-       IMG_INT iCPU;\r
-\r
-       if (!in_interrupt())\r
-       {\r
-\r
-               iCPU = get_cpu();\r
-\r
-\r
-               PVR_ASSERT(iCPU != -1);\r
-\r
-               PVR_ASSERT(!PowerLockWrappedOnCPU(psSysSpecData));\r
-\r
-               spin_lock(&psSysSpecData->sPowerLock);\r
-\r
-               atomic_set(&psSysSpecData->sPowerLockCPU, iCPU);\r
-       }\r
-}\r
-\r
-static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA *psSysSpecData)\r
-{\r
-       if (!in_interrupt())\r
-       {\r
-               PVR_ASSERT(PowerLockWrappedOnCPU(psSysSpecData));\r
-\r
-               atomic_set(&psSysSpecData->sPowerLockCPU, -1);\r
-\r
-               spin_unlock(&psSysSpecData->sPowerLock);\r
-\r
-               put_cpu();\r
-       }\r
-}\r
-\r
-PVRSRV_ERROR SysPowerLockWrap(SYS_DATA *psSysData)\r
-{\r
-       SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;\r
-\r
-       PowerLockWrap(psSysSpecData);\r
-\r
-       return PVRSRV_OK;\r
-}\r
-\r
-IMG_VOID SysPowerLockUnwrap(SYS_DATA *psSysData)\r
-{\r
-       SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;\r
-\r
-       PowerLockUnwrap(psSysSpecData);\r
-}\r
-#else\r
-static IMG_BOOL PowerLockWrappedOnCPU(SYS_SPECIFIC_DATA unref__ *psSysSpecData)\r
-{\r
-       return IMG_FALSE;\r
-}\r
-\r
-static IMG_VOID PowerLockWrap(SYS_SPECIFIC_DATA unref__ *psSysSpecData)\r
-{\r
-}\r
-\r
-static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA unref__ *psSysSpecData)\r
-{\r
-}\r
-\r
-PVRSRV_ERROR SysPowerLockWrap(SYS_DATA unref__ *psSysData)\r
-{\r
-       return PVRSRV_OK;\r
-}\r
-\r
-IMG_VOID SysPowerLockUnwrap(SYS_DATA unref__ *psSysData)\r
-{\r
-}\r
-#endif\r
-\r
-IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)\r
-{\r
-       IMG_BOOL bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);\r
-\r
-       if (bPowerLock)\r
-       {\r
-               PowerLockUnwrap(psSysSpecData);\r
-       }\r
-\r
-       return bPowerLock;\r
-}\r
-\r
-IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)\r
-{\r
-       PowerLockWrap(psSysSpecData);\r
-}\r
-\r
-static inline IMG_UINT32 scale_by_rate(IMG_UINT32 val, IMG_UINT32 rate1, IMG_UINT32 rate2)\r
-{\r
-       if (rate1 >= rate2)\r
-       {\r
-               return val * (rate1 / rate2);\r
-       }\r
-\r
-       return val / (rate2 / rate1);\r
-}\r
-\r
-static inline IMG_UINT32 scale_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)\r
-{\r
-       return scale_by_rate(val, rate, SYS_SGX_CLOCK_SPEED);\r
-}\r
-\r
-static inline IMG_UINT32 scale_inv_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)\r
-{\r
-       return scale_by_rate(val, SYS_SGX_CLOCK_SPEED, rate);\r
-}\r
-\r
-IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psTimingInfo)\r
-{\r
-       IMG_UINT32 rate;\r
-\r
-#if defined(NO_HARDWARE)\r
-       rate = SYS_SGX_CLOCK_SPEED;\r
-#else\r
-       PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);\r
-\r
-       rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);\r
-       PVR_ASSERT(rate != 0);\r
-#endif\r
-       psTimingInfo->ui32CoreClockSpeed = rate;\r
-       psTimingInfo->ui32HWRecoveryFreq = scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);\r
-       psTimingInfo->ui32uKernelFreq = scale_prop_to_SGX_clock(SYS_SGX_PDS_TIMER_FREQ, rate);\r
-       psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;\r
-}\r
-\r
-#if defined(CONSTRAINT_NOTIFICATIONS)\r
-#if !defined(SGX_DYNAMIC_TIMING_INFO)\r
-#error "SGX_DYNAMIC_TIMING_INFO must be defined for this platform"\r
-#endif\r
-\r
-#if !defined(PDUMP) && !defined(NO_HARDWARE)\r
-static inline IMG_BOOL ConstraintNotificationsEnabled(SYS_SPECIFIC_DATA *psSysSpecData)\r
-{\r
-       return (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0) && psSysSpecData->bSGXInitComplete && psSysSpecData->bConstraintNotificationsEnabled;\r
-\r
-}\r
-\r
-static IMG_BOOL NotifyLockedOnCPU(SYS_SPECIFIC_DATA *psSysSpecData)\r
-{\r
-       IMG_INT iCPU = get_cpu();\r
-       IMG_BOOL bLocked = (iCPU == atomic_read(&psSysSpecData->sNotifyLockCPU));\r
-\r
-       put_cpu();\r
-\r
-       return bLocked;\r
-}\r
-\r
-static IMG_VOID NotifyLock(SYS_SPECIFIC_DATA *psSysSpecData)\r
-{\r
-       IMG_INT iCPU;\r
-\r
-       BUG_ON(in_interrupt());\r
-\r
-\r
-       iCPU = get_cpu();\r
-\r
-\r
-       PVR_ASSERT(iCPU != -1);\r
-\r
-       PVR_ASSERT(!NotifyLockedOnCPU(psSysSpecData));\r
-\r
-       spin_lock(&psSysSpecData->sNotifyLock);\r
-\r
-       atomic_set(&psSysSpecData->sNotifyLockCPU, iCPU);\r
-\r
-}\r
-\r
-static IMG_VOID NotifyUnlock(SYS_SPECIFIC_DATA *psSysSpecData)\r
-{\r
-       PVR_ASSERT(NotifyLockedOnCPU(psSysSpecData));\r
-\r
-       atomic_set(&psSysSpecData->sNotifyLockCPU, -1);\r
-\r
-       spin_unlock(&psSysSpecData->sNotifyLock);\r
-\r
-       put_cpu();\r
-}\r
-\r
-static IMG_INT VDD2PostFunc(struct notifier_block *n, IMG_UINT32 event, IMG_VOID *ptr)\r
-{\r
-       PVR_UNREFERENCED_PARAMETER(n);\r
-       PVR_UNREFERENCED_PARAMETER(event);\r
-       PVR_UNREFERENCED_PARAMETER(ptr);\r
-\r
-       if (in_interrupt())\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "%s Called in interrupt context.  Ignoring.", __FUNCTION__));\r
-               return 0;\r
-       }\r
-\r
-\r
-       if (!NotifyLockedOnCPU(gpsSysSpecificData))\r
-       {\r
-               return 0;\r
-       }\r
-\r
-#if defined(DEBUG)\r
-       if (ConstraintNotificationsEnabled(gpsSysSpecificData))\r
-       {\r
-               IMG_UINT32 rate;\r
-\r
-               rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);\r
-\r
-               PVR_ASSERT(rate != 0);\r
-\r
-               PVR_DPF((PVR_DBG_MESSAGE, "%s: SGX clock rate: %dMHz", __FUNCTION__, HZ_TO_MHZ(rate)));\r
-       }\r
-#endif\r
-       if (gpsSysSpecificData->bCallVDD2PostFunc)\r
-       {\r
-               PVRSRVDevicePostClockSpeedChange(gpsSysSpecificData->psSGXDevNode->sDevId.ui32DeviceIndex, IMG_TRUE, IMG_NULL);\r
-\r
-               gpsSysSpecificData->bCallVDD2PostFunc = IMG_FALSE;\r
-       }\r
-       else\r
-       {\r
-               if (ConstraintNotificationsEnabled(gpsSysSpecificData))\r
-               {\r
-                       PVR_TRACE(("%s: Not calling PVR clock speed notification functions", __FUNCTION__));\r
-               }\r
-       }\r
-\r
-       NotifyUnlock(gpsSysSpecificData);\r
-\r
-       return 0;\r
-}\r
-\r
-static IMG_INT VDD2PreFunc(struct notifier_block *n, IMG_UINT32 event, IMG_VOID *ptr)\r
-{\r
-       PVR_UNREFERENCED_PARAMETER(n);\r
-       PVR_UNREFERENCED_PARAMETER(event);\r
-       PVR_UNREFERENCED_PARAMETER(ptr);\r
-\r
-       if (in_interrupt())\r
-       {\r
-               PVR_DPF((PVR_DBG_WARNING, "%s Called in interrupt context.  Ignoring.", __FUNCTION__));\r
-               return 0;\r
-       }\r
-\r
-       if (PowerLockWrappedOnCPU(gpsSysSpecificData))\r
-       {\r
-               PVR_DPF((PVR_DBG_WARNING, "%s Called from within a power transition.  Ignoring.", __FUNCTION__));\r
-               return 0;\r
-       }\r
-\r
-       NotifyLock(gpsSysSpecificData);\r
-\r
-       PVR_ASSERT(!gpsSysSpecificData->bCallVDD2PostFunc);\r
-\r
-       if (ConstraintNotificationsEnabled(gpsSysSpecificData))\r
-       {\r
-               PVRSRV_ERROR eError;\r
-\r
-               eError = PVRSRVDevicePreClockSpeedChange(gpsSysSpecificData->psSGXDevNode->sDevId.ui32DeviceIndex, IMG_TRUE, IMG_NULL);\r
-\r
-               gpsSysSpecificData->bCallVDD2PostFunc = (eError == PVRSRV_OK);\r
-\r
-       }\r
-\r
-       return 0;\r
-}\r
-static IMG_VOID RegisterConstraintNotifications(IMG_VOID)\r
-{\r
-       PVR_TRACE(("Registering constraint notifications"));\r
-\r
-       PVR_ASSERT(!gpsSysSpecificData->bConstraintNotificationsEnabled);\r
-\r
-\r
-       NotifyLock(gpsSysSpecificData);\r
-       gpsSysSpecificData->bConstraintNotificationsEnabled = IMG_TRUE;\r
-       NotifyUnlock(gpsSysSpecificData);\r
-\r
-       PVR_TRACE(("VDD2 constraint notifications registered"));\r
-}\r
-\r
-static IMG_VOID UnRegisterConstraintNotifications(IMG_VOID)\r
-{\r
-       PVR_TRACE(("Unregistering constraint notifications"));\r
-\r
-\r
-       NotifyLock(gpsSysSpecificData);\r
-       gpsSysSpecificData->bConstraintNotificationsEnabled = IMG_FALSE;\r
-       NotifyUnlock(gpsSysSpecificData);\r
-\r
-}\r
-#else\r
-static IMG_VOID RegisterConstraintNotifications(IMG_VOID)\r
-{\r
-}\r
-\r
-static IMG_VOID UnRegisterConstraintNotifications(IMG_VOID)\r
-{\r
-}\r
-#endif\r
-#endif\r
-\r
-PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)\r
-{\r
-#if !defined(NO_HARDWARE)\r
-       SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;\r
-       long lNewRate;\r
-       IMG_INT res;\r
-\r
-\r
-       if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)\r
-       {\r
-               return PVRSRV_OK;\r
-       }\r
-\r
-       PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));\r
-\r
-       res = clk_enable(psSysSpecData->psSGX_FCK);\r
-       if (res < 0)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));\r
-               return PVRSRV_ERROR_GENERIC;\r
-       }\r
-\r
-#if 0 \r
-       lNewRate = clk_round_rate(psSysSpecData->psSGX_FCK, SYS_SGX_CLOCK_SPEED + ONE_MHZ);\r
-        PVR_DPF((PVR_DBG_MESSAGE, "New SGXClocks: CPU Clock is %dMhz", HZ_TO_MHZ(lNewRate)));\r
-       //PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: New SGX Func Clk = (%d)", lNewRate));\r
-       if (lNewRate <= 0)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));\r
-               return PVRSRV_ERROR_GENERIC;\r
-       }\r
-\r
-       res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);\r
-       if (res < 0)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't set SGX function clock rate (%d)", res));\r
-               return PVRSRV_ERROR_GENERIC;\r
-       }\r
-#endif\r
-#if defined(DEBUG)\r
-       {\r
-\r
-               IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);\r
-               PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));\r
-       }\r
-#endif\r
-\r
-       set_vdd2_constraint();\r
-\r
-       lNewRate  = clk_get_rate(psSysSpecData->psSGX_FCK);\r
-       atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);\r
-       PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Final SGX Func Clk = (%d)", lNewRate));\r
-#else\r
-       PVR_UNREFERENCED_PARAMETER(psSysData);\r
-#endif\r
-       return PVRSRV_OK;\r
-}\r
-\r
-\r
-IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)\r
-{\r
-#if !defined(NO_HARDWARE)\r
-       SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;\r
-\r
-       if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)\r
-       {\r
-               return;\r
-       }\r
-\r
-       PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));\r
-\r
-       if (psSysSpecData->psSGX_FCK)\r
-       {\r
-               clk_disable(psSysSpecData->psSGX_FCK);\r
-       }\r
-\r
-       remove_vdd2_constraint();\r
-\r
-       atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);\r
-\r
-#else\r
-       PVR_UNREFERENCED_PARAMETER(psSysData);\r
-#endif\r
-}\r
-\r
-PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)\r
-{\r
-       SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;\r
-       struct clk *psCLK;\r
-       IMG_INT res;\r
-       PVRSRV_ERROR eError;\r
-       IMG_BOOL bPowerLock;\r
-\r
-#if defined(DEBUG) || defined(TIMING)\r
-       IMG_INT rate;\r
-       struct clk *sys_ck;\r
-       IMG_CPU_PHYADDR     TimerRegPhysBase;\r
-       IMG_HANDLE hTimerEnable;\r
-       IMG_UINT32 *pui32TimerEnable;\r
-\r
-#endif\r
-\r
-       PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));\r
-\r
-       if (!psSysSpecData->bSysClocksOneTimeInit)\r
-       {\r
-               bPowerLock = IMG_FALSE;\r
-\r
-               spin_lock_init(&psSysSpecData->sPowerLock);\r
-               atomic_set(&psSysSpecData->sPowerLockCPU, -1);\r
-               spin_lock_init(&psSysSpecData->sNotifyLock);\r
-               atomic_set(&psSysSpecData->sNotifyLockCPU, -1);\r
-\r
-               atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);\r
-\r
-                psCLK = clk_get(NULL, "sgx_ck");\r
-               if (IS_ERR(psCLK))\r
-               {\r
-                       PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));\r
-                       goto ExitError;\r
-               }\r
-               psSysSpecData->psSGX_FCK = psCLK;\r
-\r
-               psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;\r
-       }\r
-       else\r
-       {\r
-\r
-               bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);\r
-               if (bPowerLock)\r
-               {\r
-                       PowerLockUnwrap(psSysSpecData);\r
-               }\r
-       }\r
-\r
-#if defined(CONSTRAINT_NOTIFICATIONS)\r
-\r
-       RegisterConstraintNotifications();\r
-#endif\r
-\r
-#if defined(DEBUG) || defined(TIMING)\r
-\r
-       psCLK = clk_get(NULL, "gpt7_fck");\r
-       if (IS_ERR(psCLK))\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));\r
-               goto ExitUnRegisterConstraintNotifications;\r
-       }\r
-       psSysSpecData->psGPT11_FCK = psCLK;\r
-\r
-       psCLK = clk_get(NULL, "gpt7_ick");\r
-       if (IS_ERR(psCLK))\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));\r
-               goto ExitUnRegisterConstraintNotifications;\r
-       }\r
-       psSysSpecData->psGPT11_ICK = psCLK;\r
-\r
-       rate = clk_get_rate(psSysSpecData->psGPT11_FCK);\r
-       PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate)));\r
-\r
-       res = clk_enable(psSysSpecData->psGPT11_FCK);\r
-       if (res < 0)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));\r
-               goto ExitUnRegisterConstraintNotifications;\r
-       }\r
-\r
-       res = clk_enable(psSysSpecData->psGPT11_ICK);\r
-       if (res < 0)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));\r
-               goto ExitDisableGPT11FCK;\r
-       }\r
-\r
-\r
-       TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE;\r
-       pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,\r
-                  4,\r
-                  PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,\r
-                  &hTimerEnable);\r
-\r
-       if (pui32TimerEnable == IMG_NULL)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));\r
-               goto ExitDisableGPT11ICK;\r
-       }\r
-\r
-       rate = *pui32TimerEnable;\r
-       if(!(rate & 4))\r
-       {\r
-               PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));\r
-\r
-\r
-               *pui32TimerEnable = rate | 4;\r
-       }\r
-\r
-       OSUnMapPhysToLin(pui32TimerEnable,\r
-                   4,\r
-                   PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,\r
-                   hTimerEnable);\r
-\r
-\r
-       TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;\r
-       pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,\r
-                  4,\r
-                  PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,\r
-                  &hTimerEnable);\r
-\r
-       if (pui32TimerEnable == IMG_NULL)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));\r
-               goto ExitDisableGPT11ICK;\r
-       }\r
-\r
-\r
-       *pui32TimerEnable = 3;\r
-\r
-       OSUnMapPhysToLin(pui32TimerEnable,\r
-                   4,\r
-                   PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,\r
-                   hTimerEnable);\r
-\r
-#endif\r
-\r
-       eError = PVRSRV_OK;\r
-       goto Exit;\r
-\r
-#if defined(DEBUG) || defined(TIMING)\r
-ExitDisableGPT11ICK:\r
-       clk_disable(psSysSpecData->psGPT11_ICK);\r
-ExitDisableGPT11FCK:\r
-       clk_disable(psSysSpecData->psGPT11_FCK);\r
-ExitUnRegisterConstraintNotifications:\r
-#endif\r
-#if defined(CONSTRAINT_NOTIFICATIONS)\r
-       UnRegisterConstraintNotifications();\r
-\r
-#endif\r
-ExitError:\r
-       eError = PVRSRV_ERROR_GENERIC;\r
-Exit:\r
-       if (bPowerLock)\r
-       {\r
-               PowerLockWrap(psSysSpecData);\r
-       }\r
-\r
-#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)\r
-       if (eError == PVRSRV_OK)\r
-       {\r
-\r
-               eError = EnableSGXClocks(psSysData);\r
-       }\r
-#endif\r
-       return eError;\r
-}\r
-\r
-IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)\r
-{\r
-       SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;\r
-       IMG_BOOL bPowerLock;\r
-#if defined(DEBUG) || defined(TIMING)\r
-       IMG_CPU_PHYADDR TimerRegPhysBase;\r
-       IMG_HANDLE hTimerDisable;\r
-       IMG_UINT32 *pui32TimerDisable;\r
-#endif\r
-\r
-       PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));\r
-\r
-       DisableSGXClocks(psSysData);\r
-\r
-       bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);\r
-       if (bPowerLock)\r
-       {\r
-\r
-               PowerLockUnwrap(psSysSpecData);\r
-       }\r
-\r
-#if defined(CONSTRAINT_NOTIFICATIONS)\r
-       UnRegisterConstraintNotifications();\r
-#endif\r
-\r
-#if defined(DEBUG) || defined(TIMING)\r
-\r
-       TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;\r
-       pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,\r
-                               4,\r
-                               PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,\r
-                               &hTimerDisable);\r
-\r
-       if (pui32TimerDisable == IMG_NULL)\r
-       {\r
-               PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: OSMapPhysToLin failed"));\r
-       }\r
-       else\r
-       {\r
-               *pui32TimerDisable = 0;\r
-\r
-               OSUnMapPhysToLin(pui32TimerDisable,\r
-                               4,\r
-                               PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,\r
-                               hTimerDisable);\r
-       }\r
-\r
-       clk_disable(psSysSpecData->psGPT11_ICK);\r
-\r
-       clk_disable(psSysSpecData->psGPT11_FCK);\r
-\r
-#endif\r
-       if (bPowerLock)\r
-       {\r
-               PowerLockWrap(psSysSpecData);\r
-       }\r
-}\r
diff --git a/services4/system/ti81xx/.sysconfig.h.swp b/services4/system/ti81xx/.sysconfig.h.swp
deleted file mode 100644 (file)
index ecd8128..0000000
Binary files a/services4/system/ti81xx/.sysconfig.h.swp and /dev/null differ
index 248da05..f6c3849 100644 (file)
@@ -163,8 +163,10 @@ PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
 {
 #if !defined(NO_HARDWARE)
        SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+/*
        long lNewRate;
        long lRate;
+*/
        IMG_INT res;
 
        
@@ -269,11 +271,12 @@ PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
 {
        SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
        struct clk *psCLK;
-       IMG_INT res;
+//     IMG_INT res;
         IMG_BOOL bPowerLock;
        PVRSRV_ERROR eError;
 
 #if defined(DEBUG) || defined(TIMING)
+        IMG_INT res;
        IMG_INT rate;
        struct clk *sys_ck;
        IMG_CPU_PHYADDR     TimerRegPhysBase;
index ecd1160..1560067 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <asm/page.h>
@@ -54,7 +55,7 @@
 #include "hostfunc.h"
 #include "dbgdriv.h"
 
-#if defined(DEBUG) && !defined(SUPPORT_DRI_DRM)
+#if defined(MODULE) && defined(DEBUG) && !defined(SUPPORT_DRI_DRM)
 IMG_UINT32     gPVRDebugLevel = (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING);
 
 #define PVR_STRING_TERMINATOR          '\0'
index 965c352..d4fe778 100644 (file)
@@ -116,7 +116,7 @@ IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table)
 #if defined(SUPPORT_DRI_DRM)
 void dbgdrv_cleanup(void)
 #else
-void cleanup_module(void)
+static void __exit dbgdrv_cleanup(void)
 #endif
 {
 #if !defined(SUPPORT_DRI_DRM)
@@ -136,7 +136,7 @@ void cleanup_module(void)
 #if defined(SUPPORT_DRI_DRM)
 IMG_INT dbgdrv_init(void)
 #else
-int init_module(void)
+static int __init dbgdrv_init(void)
 #endif
 {
 #if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
@@ -309,3 +309,8 @@ IMG_VOID DefineHotKey (IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, PHOTK
 }
 
 EXPORT_SYMBOL(DBGDrvGetServiceTable);
+
+#if !defined(SUPPORT_DRI_DRM)
+subsys_initcall(dbgdrv_init);
+module_exit(dbgdrv_cleanup);
+#endif