-This software is Copyright (C) 2008 Imagination Technologies Ltd.
+This software is Copyright (C) Imagination Technologies Ltd.
All rights reserved.
You may use, distribute and copy this software under the terms of
SGX Embedded Systems DDK for the Linux kernel.
-Copyright (C) 2008 Imagination Technologies Ltd. All rights reserved.
+Copyright (C) Imagination Technologies Ltd. All rights reserved.
======================================================================
This file covers how to build and install the Imagination Technologies
SGX Embedded Systems DDK for Linux kernel.
-Copyright (C) 2008 Imagination Technologies Ltd. All rights reserved.
+Copyright (C) Imagination Technologies Ltd. All rights reserved.
======================================================================
# Kernel C only
#
ALL_KBUILD_CFLAGS := $(COMMON_CFLAGS) -Wno-unused-parameter -Wno-sign-compare \
- $(call cc-option,-Wno-type-limits)
+ $(call cc-option,-Wno-type-limits) \
+ $(call cc-option,-Wno-unused-but-set-variable)
# This variable contains a list of all modules built by kbuild
ALL_KBUILD_MODULES :=
$(eval $(call TunableBothConfigC,SGX_CORE_REV,))
$(eval $(call TunableBothConfigC,USE_SGX_CORE_REV_HEAD,))
-$(eval $(call BothConfigC,TRANSFER_QUEUE,1))
-$(eval $(call BothConfigC,PVR_SECURE_HANDLES,1))
+$(eval $(call BothConfigC,TRANSFER_QUEUE,))
+$(eval $(call BothConfigC,PVR_SECURE_HANDLES,))
$(eval $(call KernelConfigC,DISPLAY_CONTROLLER,$(DISPLAY_CONTROLLER)))
$(eval $(call TunableBothConfigC,SUPPORT_SHARED_PB,))
$(eval $(call TunableBothConfigC,SUPPORT_HYBRID_PB,))
$(eval $(call TunableBothConfigC,SUPPORT_HW_RECOVERY,1))
-$(eval $(call TunableBothConfigC,SUPPORT_ACTIVE_POWER_MANAGEMENT,0))
+$(eval $(call TunableBothConfigC,SUPPORT_ACTIVE_POWER_MANAGEMENT,1))
$(eval $(call TunableBothConfigC,SUPPORT_SGX_HWPERF,1))
$(eval $(call TunableBothConfigC,SUPPORT_SGX_LOW_LATENCY_SCHEDULING,1))
$(eval $(call TunableBothConfigC,SUPPORT_MEMINFO_IDS,))
$(eval $(call TunableBothConfigC,SUPPORT_SGX_NEW_STATUS_VALS,1))
$(eval $(call TunableBothConfigC,SUPPORT_PDUMP_MULTI_PROCESS,))
-$(eval $(call TunableBothConfigC,SUPPORT_DBGDRV_EVENT_OBJECTS,))
+$(eval $(call TunableBothConfigC,SUPPORT_DBGDRV_EVENT_OBJECTS,1))
$(eval $(call TunableBothConfigC,PVR_DBG_BREAK_ASSERT_FAIL,))
$(eval $(call TunableBothConfigC,SGX_FEATURE_SYSTEM_CACHE,))
$(eval $(call TunableBothConfigC,SGX_BYPASS_SYSTEM_CACHE,))
$(eval $(call TunableKernelConfigC,PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE,))
$(eval $(call TunableKernelConfigC,PVR_LINUX_TIMERS_USING_WORKQUEUES,))
$(eval $(call TunableKernelConfigC,PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE,))
-$(eval $(call TunableKernelConfigC,LDM_PLATFORM,1))
+$(eval $(call TunableKernelConfigC,LDM_PLATFORM,))
$(eval $(call TunableKernelConfigC,PVR_LDM_PLATFORM_PRE_REGISTERED,))
$(eval $(call TunableKernelConfigC,LDM_PCI,))
$(eval $(call TunableKernelConfigC,PVRSRV_DUMP_MK_TRACE,))
+++ /dev/null
-#
-# Copyright (C) Imagination Technologies Ltd. All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful but, except
-# as otherwise stated in writing, without any warranty; without even the
-# implied warranty of merchantability or fitness for a particular purpose.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Imagination Technologies Ltd. <gpl-support@imgtec.com>
-# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
-#
-#
-
-PVR_SYSTEM := ti335x
-
-KERNEL_COMPONENTS := srvkm bufferclass_example
-
-include ../kernel_version.mk
-
-OMAP_KERNEL_AT_LEAST_2_6_35 := $(shell test $(KERNEL_VERSION) -ge 2 -a \
- $(KERNEL_PATCHLEVEL) -ge 6 -a \
- $(KERNEL_SUBLEVEL) -ge 35 && echo 1 || echo 0)
-
-# Only enable active power management if passive power management is
-# enabled, as indicated by LDM_PLATFORM being set to 1. On OMAP,
-# the system can suspend in the case where active power management is
-# enabled in the SGX driver, but passive power management isn't. As
-# passive power management isn't enabled, the driver won't see the
-# system suspend/resume events, and so won't take appropriate action.
-LDM_PLATFORM ?= 1
-
-SUPPORT_LINUX_USING_WORKQUEUES := 1
-DISPLAY_CONTROLLER_COMPONENT += dc_omapfb3_linux
-DISPLAY_CONTROLLER := omaplfb
-
-OPTIM := -Os
-
-SYS_CFLAGS := -march=armv7-a
-TRANSFER_QUEUE ?= 1
-SUPPORT_SGX_EVENT_OBJECT ?= 1
-SUPPORT_SECURE_HANDLES = 1
-SUPPORT_SRVINIT = 1
-SUPPORT_PERCONTEXT_PB = 1
-DISABLE_SGX_PB_GROW_SHRINK ?= 1
-SUPPORT_LINUX_X86_PAT ?=1
-SUPPORT_LINUX_X86_WRITECOMBINE ?=1
-SUPPORT_SGX_LOW_LATENCY_SCHEDULING ?=1
-
-
-SUPPORT_OMAP4430_NEON ?= 1
-PVR_LDM_PLATFORM_PRE_REGISTERED := 0
-
-ifeq ($(SUPPORT_OMAP4430_NEON),1)
-SYS_CFLAGS += -ftree-vectorize -mfpu=neon -mfloat-abi=softfp
-endif
-
-PVR_NO_FULL_CACHE_OPS := 1
-
-LIBGCC := $(shell $(CROSS_COMPILE)gcc -print-libgcc-file-name)
-
-SGXCORE := 530
-SGX_CORE_REV := 125
-
-SGX_DYNAMIC_TIMING_INFO := 1
-SYS_CUSTOM_POWERLOCK_WRAP := 1
-
-ifeq ($(OMAP_NON_FLIP_DISPLAY),1)
-OPK_DEFAULT := libpvrPVR2D_BLITWSEGL.so
-else
-OPK_DEFAULT := libpvrPVR2D_FLIPWSEGL.so
-endif
-
-ifeq ($(OMAP_KERNEL_AT_LEAST_2_6_35),1)
-ifeq ($(LDM_PLATFORM),1)
-PVR_LDM_PLATFORM_PRE_REGISTERED := 0
-endif
-endif
-
-include ../common/xorg_test.mk
-ifeq ($(want_xorg),1)
-
-SUPPORT_DRI_DRM := 1
-
-ifeq ($(OMAP_KERNEL_AT_LEAST_2_6_35),1)
-PVR_DRI_DRM_PLATFORM_DEV := 1
-PVR_DRI_DRM_STATIC_BUS_ID := 1
-else
-PVR_DRI_DRM_NOT_PCI := 1
-endif
-
-XORG_TOOLCHAIN := tarballs-omap4-ubuntu-10.10-cross
-XORG_PVR_CONF := omap4
-XORG_PVR_VIDEO := omap4
-
-OPK_FALLBACK := libpvrPVR2D_DRIWSEGL.so
-
-ifneq ($(OMAP_NON_FLIP_DISPLAY),1)
-XORG_PVR_VIDEO ?= $(PVR_SYSTEM)
-PVR_DISPLAY_CONTROLLER_DRM_IOCTL := 1
-endif
-
-else # xorg isn't excluded
-
-OPK_FALLBACK := libpvrPVR2D_BLITWSEGL.so
-
-endif # xorg isn't excluded
-
-ifeq ($(SUPPORT_DRI_DRM),1)
-ifeq ($(PVR_DRI_DRM_NOT_PCI),1)
-KERNEL_COMPONENTS += linux_drm
-EXTRA_KBUILD_SOURCE := $(KERNELDIR)
-endif
-EXTRA_PVRSRVKM_COMPONENTS += $(DISPLAY_CONTROLLER_COMPONENT)
-else
-KERNEL_COMPONENTS += $(DISPLAY_CONTROLLER_COMPONENT)
-endif
-
-include ../config/core.mk
-include ../common/xorg.mk
-include ../common/dridrm.mk
-include ../common/opencl.mk
-include ../common/omap4.mk
-
-# We only need this for pvr_video's includes, which should
-# really be done differently, as DISPLAY_CONTROLLER_DIR is
-# now obsolete..
-#
-$(eval $(call UserConfigMake,DISPLAY_CONTROLLER_DIR,3rdparty/$(DISPLAY_CONTROLLER_COMPONENT)))
#define PVRVERSION_FAMILY "sgxddk"
#define PVRVERSION_BRANCHNAME "1.7"
-#define PVRVERSION_BUILD 783851
+#define PVRVERSION_BUILD 867897
#define PVRVERSION_BSCONTROL "CustomerTI_OMAP4430_Linux_GPL"
-#define PVRVERSION_STRING "sgxddk 17 1.7@" PVR_STR2(PVRVERSION_BUILD)
+#define PVRVERSION_STRING "CustomerTI_OMAP4430_Linux_GPL sgxddk 17 1.7@" PVR_STR2(PVRVERSION_BUILD)
#define PVRVERSION_STRING_SHORT "1.7@" PVR_STR2(PVRVERSION_BUILD)
#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
-#define PVRVERSION_BUILD_HI 78
-#define PVRVERSION_BUILD_LO 3851
+#define PVRVERSION_BUILD_HI 86
+#define PVRVERSION_BUILD_LO 7897
#define PVRVERSION_STRING_NUMERIC PVR_STR2(PVRVERSION_MAJ) "." PVR_STR2(PVRVERSION_MIN) "." PVR_STR2(PVRVERSION_BUILD_HI) "." PVR_STR2(PVRVERSION_BUILD_LO)
#endif
#
-# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+# Copyright (C) Imagination Technologies Ltd. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
#
#
-#
-
-MODULE = bc_example
-
-INCLUDES = -I$(EURASIAROOT)/include4 \
- -I$(EURASIAROOT)/services4/include \
- -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
- -I$(EURASIAROOT)/services4/system/include \
-
-SOURCES = ../bufferclass_example.c \
- ../bufferclass_example_linux.c \
- ../bufferclass_example_private.c
-SYM_VERS_DEPS = $(EURASIAROOT)/services4/srvkm/env/linux
+ccflags-y += -I$(TOP)/services4/3rdparty/bufferclass_example
-include $(EURASIAROOT)/eurasiacon/build/linux/kbuild/Makefile.kbuild_subdir_common
+bc_example-y += \
+ services4/3rdparty/bufferclass_example/bufferclass_example.o \
+ services4/3rdparty/bufferclass_example/bufferclass_example_linux.o \
+ services4/3rdparty/bufferclass_example/bufferclass_example_private.o
--- /dev/null
+#
+# Copyright (C) Imagination Technologies Ltd. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful but, except
+# as otherwise stated in writing, without any warranty; without even the
+# implied warranty of merchantability or fitness for a particular purpose.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+#
+#
+
+modules := bufferclass_example
+
+bufferclass_example_type := kernel_module
+bufferclass_example_target := bc_example.ko
+bufferclass_example_makefile := $(THIS_DIR)/Kbuild.mk
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
static PVRSRV_ERROR CloseBCDevice(IMG_UINT32 ui32DeviceID, IMG_HANDLE hDevice)
{
- UNREFERENCED_PARAMETER(ui32DeviceID);
UNREFERENCED_PARAMETER(hDevice);
return (PVRSRV_OK);
BCE_ERROR BC_Example_Buffers_Create(void)
{
BC_EXAMPLE_DEVINFO *psDevInfo;
- unsigned long i;
+ unsigned long i;
#if !defined(BC_DISCONTIG_BUFFERS)
- IMG_CPU_PHYADDR sSystemBufferCPUPAddr;
+ IMG_CPU_PHYADDR sSystemBufferCPUPAddr;
#endif
+ PVRSRV_PIXEL_FORMAT pixelformat = BC_EXAMPLE_PIXELFORMAT;
+ static IMG_UINT32 ui32Width = BC_EXAMPLE_WIDTH;
+ static IMG_UINT32 ui32Height = BC_EXAMPLE_HEIGHT;
+ static IMG_UINT32 ui32ByteStride = BC_EXAMPLE_STRIDE;
+
+ IMG_UINT32 ui32MaxWidth = 320 * 4;
psDevInfo->sBufferInfo.pixelformat = BC_EXAMPLE_PIXELFORMAT;
- psDevInfo->sBufferInfo.ui32Width = BC_EXAMPLE_WIDTH;
- psDevInfo->sBufferInfo.ui32Height = BC_EXAMPLE_HEIGHT;
- psDevInfo->sBufferInfo.ui32ByteStride = BC_EXAMPLE_STRIDE;
+ psDevInfo->sBufferInfo.ui32Width = ui32Width;
+ psDevInfo->sBufferInfo.ui32Height = ui32Height;
+ psDevInfo->sBufferInfo.ui32ByteStride = ui32ByteStride;
psDevInfo->sBufferInfo.ui32BufferDeviceID = BC_EXAMPLE_DEVICEID;
psDevInfo->sBufferInfo.ui32Flags = PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE | PVRSRV_BC_FLAGS_YUVCSC_BT601;
for(i=psDevInfo->ulNumBuffers; i < BC_EXAMPLE_NUM_BUFFERS; i++)
{
- unsigned long ulSize = BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE;
+ unsigned long ulSize = (unsigned long)(ui32Height * ui32ByteStride);
if(psDevInfo->sBufferInfo.pixelformat == PVRSRV_PIXEL_FORMAT_NV12)
{
- ulSize += ((BC_EXAMPLE_STRIDE >> 1) * (BC_EXAMPLE_HEIGHT >> 1) << 1);
+ ulSize += ((ui32ByteStride >> 1) * (ui32Height >> 1) << 1);
}
else if(psDevInfo->sBufferInfo.pixelformat == PVRSRV_PIXEL_FORMAT_I420)
{
- ulSize += (BC_EXAMPLE_STRIDE >> 1) * (BC_EXAMPLE_HEIGHT >> 1);
+ ulSize += (ui32ByteStride >> 1) * (ui32Height >> 1);
- ulSize += (BC_EXAMPLE_STRIDE >> 1) * (BC_EXAMPLE_HEIGHT >> 1);
+ ulSize += (ui32ByteStride >> 1) * (ui32Height >> 1);
}
#if defined(BC_DISCONTIG_BUFFERS)
if (BCAllocDiscontigMemory(ulSize,
- &psDevInfo->psSystemBuffer[i].hMemHandle,
- &psDevInfo->psSystemBuffer[i].sCPUVAddr,
- &psDevInfo->psSystemBuffer[i].psSysAddr) != BCE_OK)
+ &psDevInfo->psSystemBuffer[i].hMemHandle,
+ &psDevInfo->psSystemBuffer[i].sCPUVAddr,
+ &psDevInfo->psSystemBuffer[i].psSysAddr) != BCE_OK)
{
break;
}
psDevInfo->sBCJTable.pfnGetBCInfo = GetBCInfo;
psDevInfo->sBCJTable.pfnGetBufferAddr = GetBCBufferAddr;
+
+
+
+ if (ui32Width < ui32MaxWidth)
+ {
+ switch(pixelformat)
+ {
+ case PVRSRV_PIXEL_FORMAT_NV12:
+ case PVRSRV_PIXEL_FORMAT_I420:
+ {
+ ui32Width += 320;
+ ui32Height += 160;
+ ui32ByteStride = ui32Width;
+ break;
+ }
+ case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY:
+ case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY:
+ case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV:
+ case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU:
+ {
+ ui32Width += 320;
+ ui32Height += 160;
+ ui32ByteStride = ui32Width*2;
+ break;
+ }
+ case PVRSRV_PIXEL_FORMAT_RGB565:
+ {
+ ui32Width += 320;
+ ui32Height += 160;
+ ui32ByteStride = ui32Width*2;
+ break;
+ }
+ default:
+ {
+ return (BCE_ERROR_INVALID_PARAMS);
+ }
+ }
+ }
+ else
+ {
+ ui32Width = BC_EXAMPLE_WIDTH;
+ ui32Height = BC_EXAMPLE_HEIGHT;
+ ui32ByteStride = BC_EXAMPLE_STRIDE;
+ }
+
return (BCE_OK);
}
+
BCE_ERROR BC_Example_Buffers_Destroy(void)
{
BC_EXAMPLE_DEVINFO *psDevInfo;
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include <linux/dma-mapping.h>
#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+#include <linux/mutex.h>
+#endif
+
+#if defined(BC_DISCONTIG_BUFFERS)
+#include <linux/vmalloc.h>
+#endif
+
#include "bufferclass_example.h"
#include "bufferclass_example_linux.h"
#include "bufferclass_example_private.h"
MODULE_SUPPORTED_DEVICE(DEVNAME);
-int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+static long BC_Example_Bridge_Unlocked(struct file *file, unsigned int cmd, unsigned long arg);
+#else
+static int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+static DEFINE_MUTEX(sBCExampleBridgeMutex);
+#endif
#if defined(LDM_PLATFORM) || defined(LDM_PCI)
static struct class *psPvrClass;
static int AssignedMajorNumber;
static struct file_operations bufferclass_example_fops = {
- ioctl:BC_Example_Bridge,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+ .unlocked_ioctl = BC_Example_Bridge_Unlocked
+#else
+ .ioctl = BC_Example_Bridge
+#endif
};
}
-int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
int err = -EFAULT;
int command = _IOC_NR(cmd);
BC_Example_ioctl_package sBridge;
+ PVR_UNREFERENCED_PARAMETER(inode);
+
if (copy_from_user(&sBridge, (void *)arg, sizeof(sBridge)) != 0)
{
return err;
}
break;
}
+ case _IOC_NR(BC_Example_ioctl_reconfigure_buffer):
+ {
+ if(ReconfigureBuffer(&sBridge.outputparam) == -1)
+ {
+ return err;
+ }
+ break;
+ }
default:
return err;
}
return 0;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+static long BC_Example_Bridge_Unlocked(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int res;
+
+ mutex_lock(&sBCExampleBridgeMutex);
+ res = BC_Example_Bridge(NULL, file, cmd, arg);
+ mutex_unlock(&sBCExampleBridgeMutex);
+
+ return res;
+}
+#endif
module_init(BC_Example_ModInit);
module_exit(BC_Example_ModCleanup);
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#define BC_Example_ioctl_fill_buffer BC_EXAMPLE_IOWR(0)
#define BC_Example_ioctl_get_buffer_count BC_EXAMPLE_IOWR(1)
+#define BC_Example_ioctl_reconfigure_buffer BC_EXAMPLE_IOWR(2)
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
case PVRSRV_PIXEL_FORMAT_RGB565:
default:
{
- FillRGB565Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
+ FillRGB565Image(psBuffer->sCPUVAddr,
+ psBufferInfo->ui32Width,
+ psBufferInfo->ui32Height,
+ psBufferInfo->ui32ByteStride);
break;
}
case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY:
case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV:
case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU:
{
- FillYUV422Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE, psBufferInfo->pixelformat);
+ FillYUV422Image(psBuffer->sCPUVAddr,
+ psBufferInfo->ui32Width,
+ psBufferInfo->ui32Height,
+ psBufferInfo->ui32ByteStride,
+ psBufferInfo->pixelformat);
break;
}
case PVRSRV_PIXEL_FORMAT_NV12:
{
- FillNV12Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
+ FillNV12Image(psBuffer->sCPUVAddr,
+ psBufferInfo->ui32Width,
+ psBufferInfo->ui32Height,
+ psBufferInfo->ui32ByteStride);
break;
}
case PVRSRV_PIXEL_FORMAT_I420:
{
- FillI420Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
+ FillI420Image(psBuffer->sCPUVAddr,
+ psBufferInfo->ui32Width,
+ psBufferInfo->ui32Height,
+ psBufferInfo->ui32ByteStride);
break;
}
}
return 0;
}
+
+
+int ReconfigureBuffer(unsigned int *uiSucceed)
+{
+ BCE_ERROR eError;
+
+
+ eError = BC_Example_Buffers_Destroy();
+
+ if (eError != BCE_OK)
+ {
+ *uiSucceed = 0;
+ return -1;
+ }
+
+
+
+
+
+ eError = BC_Example_Buffers_Create();
+
+ if (eError != BCE_OK)
+ {
+ *uiSucceed = 0;
+ return -1;
+ }
+
+
+ *uiSucceed = 1;
+ return 0;
+}
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
int FillBuffer(unsigned int uiBufferIndex);
int GetBufferCount(unsigned int *puiBufferCount);
+int ReconfigureBuffer(unsigned int *uiSucceed);
#endif
#include "bc_cat.h"
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+#include <linux/mutex.h>
+#endif
+
+#if defined(BC_DISCONTIG_BUFFERS)
+#include <linux/vmalloc.h>
+#endif
#define DEVNAME "bccat"
#define DRVNAME DEVNAME
#else
static long bc_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
+static long bc_ioctl_unlocked(struct file *file,
+ unsigned int cmd, unsigned long arg);
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+static DEFINE_MUTEX(sBCExampleBridgeMutex);
#endif
static int bc_mmap(struct file *filp, struct vm_area_struct *vma);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
.ioctl = bc_ioctl,
#else
- .unlocked_ioctl = bc_ioctl,
+ .unlocked_ioctl = bc_ioctl_unlocked,
#ifdef CONFIG_COMPAT
.compat_ioctl = bc_ioctl,
#endif
IMG_UINT32 i, stride;
unsigned long ulSize;
PVRSRV_PIXEL_FORMAT pixel_fmt;
-
+//IMG_UINT32 ui32MaxWidth = 320 * 4;
if (p->count <= 0)
return -EINVAL;
psDevInfo->sBCJTable.pfnGetBCBuffer = GetBCBuffer;
psDevInfo->sBCJTable.pfnGetBCInfo = GetBCInfo;
psDevInfo->sBCJTable.pfnGetBufferAddr = GetBCBufferAddr;
-
-
+/*
+if (psDevInfo->sBufferInfo.ui32Width < ui32MaxWidth)
+ {
+ switch(pixel_fmt)
+ {
+ case PVRSRV_PIXEL_FORMAT_NV12:
+ case PVRSRV_PIXEL_FORMAT_I420:
+ {
+ psDevInfo->sBufferInfo.ui32Width += 320;
+ psDevInfo->sBufferInfo.ui32Height += 160;
+ psDevInfo->sBufferInfo.ui32ByteStride = psDevInfo->sBufferInfo.ui32Width;
+ break;
+ }
+ case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY:
+ case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY:
+ case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV:
+ case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU:
+ {
+ psDevInfo->sBufferInfo.ui32Width += 320;
+ psDevInfo->sBufferInfo.ui32Height += 160;
+ psDevInfo->sBufferInfo.ui32ByteStride = ui32Width*2;
+ break;
+ }
+ case PVRSRV_PIXEL_FORMAT_RGB565:
+ {
+ psDevInfo->sBufferInfo.ui32Width += 320;
+ psDevInfo->sBufferInfo.ui32Height += 160;
+ psDevInfo->sBufferInfo.ui32ByteStride = ui32Width*2;
+ break;
+ }
+ default:
+ {
+ return (BCE_ERROR_INVALID_PARAMS);
+ }
+ }
+ }
+ else
+ {
+ psDevInfo->sBufferInfo.ui32Width = BC_EXAMPLE_WIDTH;
+ psDevInfo->sBufferInfo.ui32Height = BC_EXAMPLE_HEIGHT;
+ psDevInfo->sBufferInfo.ui32ByteStride = BC_EXAMPLE_STRIDE;
+ }
+*/
return (BCE_OK);
}
return 0;
}
+int ReconfigureBuffer(int id, bc_buf_params_t *p, unsigned int *uiSucceed)
+{
+ BCE_ERROR eError;
+
+
+ eError = BC_DestroyBuffers(id);
+
+ if (eError != BCE_OK)
+ {
+ *uiSucceed = 0;
+ return -1;
+ }
+
+
+
+
+
+ eError = BC_CreateBuffers(id,p);
+
+ if (eError != BCE_OK)
+ {
+ *uiSucceed = 0;
+ return -1;
+ }
+
+
+ *uiSucceed = 1;
+ return 0;
+}
+
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
static int bc_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
#endif
{
BC_CAT_DEVINFO *devinfo;
+//PVR_UNREFERENCED_PARAMETER(inode);
int id = file_to_id (file);
if ((devinfo = GetAnchorPtr(id)) == IMG_NULL)
0xFFFFF000;
break;
}
+ case _IOC_NR(BCIORECONFIGURE_BUFFERS):
+ {
+ unsigned int outputparam;
+ bc_buf_params_t *p = (bc_buf_params_t *) arg;
+ if(ReconfigureBuffer(id,p,&outputparam) == -1)
+ {
+ return -EFAULT;
+ }
+ break;
+ }
default:
return -EFAULT;
}
return 0;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+static long bc_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int res;
+
+ mutex_lock(&sBCExampleBridgeMutex);
+ res = bc_ioctl(file, cmd, arg);
+ mutex_unlock(&sBCExampleBridgeMutex);
+
+ return res;
+}
+#endif
+
+
module_init(bc_cat_init);
module_exit(bc_cat_cleanup);
int size;
unsigned long pa;
} bc_buf_ptr_t;
+int ReconfigureBuffer(int id, bc_buf_params_t *p,unsigned int *uiSucceed);
#define BCIO_GID 'g'
#define BC_IOWR(INDEX) _IOWR(BCIO_GID, INDEX, BCIO_package)
#define BCIOREQ_BUFFERS BC_IOWR(3)
#define BCIOSET_BUFFERPHYADDR BC_IOWR(4)
+#define BCIORECONFIGURE_BUFFERS BC_IOWR(5)
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright(c) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
-I$(KERNELDIR)/arch/arm/plat-omap/include \
$(SYS_CFLAGS.1) \
+ifneq ($(FBDEV),no)
+EXTRA_CFLAGS += -DFBDEV_PRESENT
+endif
+
+
ifeq ($(SUPPORT_XORG),1)
EXTRA_CFLAGS += -DSUPPORT_DRI_DRM
EXTRA_CFLAGS += -DPVR_DISPLAY_CONTROLLER_DRM_IOCTL
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright(c) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright(c) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
{
psBuffer->hCmdComplete = (OMAPLFB_HANDLE)hCmdCookie;
psBuffer->ulSwapInterval = (unsigned long)psFlipCmd->ui32SwapInterval;
-
+#if defined(NO_HARDWARE)
+ psDevInfo->sPVRJTable.pfnPVRSRVCmdComplete((IMG_HANDLE)psBuffer->hCmdComplete, IMG_FALSE);
+#else
OMAPLFBQueueBufferForSwap(psSwapChain, psBuffer);
+#endif
}
OMAPLFBCreateSwapChainUnLock(psDevInfo);
goto ErrorFreeDevInfo;
}
-
+#ifdef FBDEV_PRESENT
if(OMAPLFBInitFBDev(psDevInfo) != OMAPLFB_OK)
{
psDevInfo->sSystemBuffer.psDevInfo = psDevInfo;
OMAPLFBInitBufferForSwap(&psDevInfo->sSystemBuffer);
-
-
+#else
+ psDevInfo->sSystemBuffer.sCPUVAddr = 0x100;
+// psDevInfo->sSystemBuffer.ulBufferSize = 600*3200;
+
+ psDevInfo->sDisplayFormat.pixelformat = 20;
+ psDevInfo->sFBInfo.ulWidth = 800;
+ psDevInfo->sFBInfo.ulHeight = 600;
+ psDevInfo->sFBInfo.ulByteStride = 3200;
+ psDevInfo->sFBInfo.ulFBSize = 8388608;
+ psDevInfo->sFBInfo.ulBufferSize = 600*3200;
+#endif
psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
": %s: Device %u: PVR Device %u: Couldn't remove device from PVR Services\n", __FUNCTION__, psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID);
return OMAPLFB_FALSE;
}
-
+#ifdef FBDEV_PRESENT
OMAPLFBDeInitFBDev(psDevInfo);
+#endif
OMAPLFBSetDevInfoPtr(psDevInfo->uiFBDevID, NULL);
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright(c) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include <linux/omapfb.h>
#include <linux/mutex.h>
-# include <plat/vrfb.h>
+//# include <plat/vrfb.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
#define PVR_OMAPFB3_NEEDS_PLAT_VRFB_H
#endif
OMAPLFB_UPDATE_MODE OMAPLFBGetUpdateMode(OMAPLFB_DEVINFO *psDevInfo)
{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)
struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
enum omap_dss_update_mode eMode;
OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
if (psDSSDrv == NULL || psDSSDev == NULL)
{
DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: No DSS device\n", __FUNCTION__, psDevInfo->uiFBDevID));
+ return OMAPLFB_UPDATE_MODE_UNDEFINED;
}
if (psDSSDrv->get_update_mode == NULL)
}
return OMAPLFB_UPDATE_MODE_UNDEFINED;
-//return OMAPLFB_UPDATE_MODE_AUTO;
+#else
+return OMAPLFB_UPDATE_MODE_AUTO;
+#endif
}
OMAPLFB_BOOL OMAPLFBSetUpdateMode(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_UPDATE_MODE eMode)
{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)
struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
enum omap_dss_update_mode eDSSMode;
}
return (res == 0);
-//return 1;
+#else
+return 1;
+#endif
}
OMAPLFB_BOOL OMAPLFBWaitForVSync(OMAPLFB_DEVINFO *psDevInfo)
{
+#ifdef FBDEV_PRESENT
struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
OMAP_DSS_MANAGER(psDSSMan, psDSSDev);
return OMAPLFB_FALSE;
}
}
-
+#endif
return OMAPLFB_TRUE;
}
OMAPLFB_ERROR OMAPLFBUnblankDisplay(OMAPLFB_DEVINFO *psDevInfo)
{
+#ifdef FBDEV_PRESENT
int res;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
console_lock();
": %s: Device %u: fb_blank failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
return (OMAPLFB_ERROR_GENERIC);
}
-
+#endif
return (OMAPLFB_OK);
}
{
psBuffer->hCmdComplete = (OMAPLFB_HANDLE)hCmdCookie;
psBuffer->ulSwapInterval = (unsigned long)psFlipCmd->ui32SwapInterval;
-
+#if defined(NO_HARDWARE)
+ psDevInfo->sPVRJTable.pfnPVRSRVCmdComplete((IMG_HANDLE)psBuffer->hCmdComplete, IMG_FALSE);
+#else
OMAPLFBQueueBufferForSwap(psSwapChain, psBuffer);
+#endif
}
OMAPLFBCreateSwapChainUnLock(psDevInfo);
--- /dev/null
+#
+# Copyright (C) Imagination Technologies Ltd. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful but, except
+# as otherwise stated in writing, without any warranty; without even the
+# implied warranty of merchantability or fitness for a particular purpose.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+#
+#
+
+$(call must-be-defined,$(SUPPORT_DRI_DRM))
+
+DRM_SOURCE_DIR := drivers/gpu/drm
+
+ccflags-y += \
+ -I$(KERNELDIR)/include/drm \
+ -I$(DRM_SOURCE_DIR)
+
+drm-y += \
+ services4/3rdparty/linux_drm/pvr_drm_stubs.o \
+ external/$(DRM_SOURCE_DIR)/drm_auth.o \
+ external/$(DRM_SOURCE_DIR)/drm_bufs.o \
+ external/$(DRM_SOURCE_DIR)/drm_cache.o \
+ external/$(DRM_SOURCE_DIR)/drm_context.o \
+ external/$(DRM_SOURCE_DIR)/drm_dma.o \
+ external/$(DRM_SOURCE_DIR)/drm_drawable.o \
+ external/$(DRM_SOURCE_DIR)/drm_drv.o \
+ external/$(DRM_SOURCE_DIR)/drm_fops.o \
+ external/$(DRM_SOURCE_DIR)/drm_gem.o \
+ external/$(DRM_SOURCE_DIR)/drm_ioctl.o \
+ external/$(DRM_SOURCE_DIR)/drm_irq.o \
+ external/$(DRM_SOURCE_DIR)/drm_lock.o \
+ external/$(DRM_SOURCE_DIR)/drm_memory.o \
+ external/$(DRM_SOURCE_DIR)/drm_proc.o \
+ external/$(DRM_SOURCE_DIR)/drm_stub.o \
+ external/$(DRM_SOURCE_DIR)/drm_vm.o \
+ external/$(DRM_SOURCE_DIR)/drm_agpsupport.o \
+ external/$(DRM_SOURCE_DIR)/drm_scatter.o \
+ external/$(DRM_SOURCE_DIR)/ati_pcigart.o \
+ external/$(DRM_SOURCE_DIR)/drm_pci.o \
+ external/$(DRM_SOURCE_DIR)/drm_sysfs.o \
+ external/$(DRM_SOURCE_DIR)/drm_hashtab.o \
+ external/$(DRM_SOURCE_DIR)/drm_sman.o \
+ external/$(DRM_SOURCE_DIR)/drm_mm.o \
+ external/$(DRM_SOURCE_DIR)/drm_crtc.o \
+ external/$(DRM_SOURCE_DIR)/drm_modes.o \
+ external/$(DRM_SOURCE_DIR)/drm_edid.o \
+ external/$(DRM_SOURCE_DIR)/drm_info.o \
+ external/$(DRM_SOURCE_DIR)/drm_debugfs.o \
+ external/$(DRM_SOURCE_DIR)/drm_encoder_slave.o
+
+# extra flags for some files
+CFLAGS_pvr_drm_stubs.o := -DCONFIG_PCI
+CFLAGS_drm_drv.o := -DCONFIG_PCI
+CFLAGS_drm_stub.o := -DCONFIG_PCI
+CFLAGS_ati_pcigart.o := -DCONFIG_PCI
--- /dev/null
+#
+# Copyright (C) Imagination Technologies Ltd. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful but, except
+# as otherwise stated in writing, without any warranty; without even the
+# implied warranty of merchantability or fitness for a particular purpose.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+#
+
+modules := linux_drm
+
+linux_drm_type := kernel_module
+linux_drm_target := drm.ko
+linux_drm_makefile := $(THIS_DIR)/Kbuild.mk
+++ /dev/null
-/**
- * \file ati_pcigart.c
- * ATI PCI GART support
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
-
-static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
- struct drm_ati_pcigart_info *gart_info)
-{
- gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
- PAGE_SIZE);
- if (gart_info->table_handle == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-static void drm_ati_free_pcigart_table(struct drm_device *dev,
- struct drm_ati_pcigart_info *gart_info)
-{
- drm_pci_free(dev, gart_info->table_handle);
- gart_info->table_handle = NULL;
-}
-
-int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
-{
- struct drm_sg_mem *entry = dev->sg;
- unsigned long pages;
- int i;
- int max_pages;
-
- /* we need to support large memory configurations */
- if (!entry) {
- DRM_ERROR("no scatter/gather memory!\n");
- return 0;
- }
-
- if (gart_info->bus_addr) {
-
- max_pages = (gart_info->table_size / sizeof(u32));
- pages = (entry->pages <= max_pages)
- ? entry->pages : max_pages;
-
- for (i = 0; i < pages; i++) {
- if (!entry->busaddr[i])
- break;
- pci_unmap_page(dev->pdev, entry->busaddr[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
- gart_info->bus_addr = 0;
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
- gart_info->table_handle) {
- drm_ati_free_pcigart_table(dev, gart_info);
- }
-
- return 1;
-}
-EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
-
-int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
-{
- struct drm_local_map *map = &gart_info->mapping;
- struct drm_sg_mem *entry = dev->sg;
- void *address = NULL;
- unsigned long pages;
- u32 *pci_gart = NULL, page_base, gart_idx;
- dma_addr_t bus_address = 0;
- int i, j, ret = 0;
- int max_ati_pages, max_real_pages;
-
- if (!entry) {
- DRM_ERROR("no scatter/gather memory!\n");
- goto done;
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
- DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
-
- if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
- DRM_ERROR("fail to set dma mask to 0x%Lx\n",
- (unsigned long long)gart_info->table_mask);
- ret = 1;
- goto done;
- }
-
- ret = drm_ati_alloc_pcigart_table(dev, gart_info);
- if (ret) {
- DRM_ERROR("cannot allocate PCI GART page!\n");
- goto done;
- }
-
- pci_gart = gart_info->table_handle->vaddr;
- address = gart_info->table_handle->vaddr;
- bus_address = gart_info->table_handle->busaddr;
- } else {
- address = gart_info->addr;
- bus_address = gart_info->bus_addr;
- DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
- (unsigned long long)bus_address,
- (unsigned long)address);
- }
-
-
- max_ati_pages = (gart_info->table_size / sizeof(u32));
- max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
- pages = (entry->pages <= max_real_pages)
- ? entry->pages : max_real_pages;
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
- memset(pci_gart, 0, max_ati_pages * sizeof(u32));
- } else {
- memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
- }
-
- gart_idx = 0;
- for (i = 0; i < pages; i++) {
- /* we need to support large memory configurations */
- entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
- DRM_ERROR("unable to map PCIGART pages!\n");
- drm_ati_pcigart_cleanup(dev, gart_info);
- address = NULL;
- bus_address = 0;
- goto done;
- }
- page_base = (u32) entry->busaddr[i];
-
- for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
- u32 val;
-
- switch(gart_info->gart_reg_if) {
- case DRM_ATI_GART_IGP:
- val = page_base | 0xc;
- break;
- case DRM_ATI_GART_PCIE:
- val = (page_base >> 8) | 0xc;
- break;
- default:
- case DRM_ATI_GART_PCI:
- val = page_base;
- break;
- }
- if (gart_info->gart_table_location ==
- DRM_ATI_GART_MAIN)
- pci_gart[gart_idx] = cpu_to_le32(val);
- else
- DRM_WRITE32(map, gart_idx * sizeof(u32), val);
- gart_idx++;
- page_base += ATI_PCIGART_PAGE_SIZE;
- }
- }
- ret = 1;
-
-#if defined(__i386__) || defined(__x86_64__)
- wbinvd();
-#else
- mb();
-#endif
-
- done:
- gart_info->addr = address;
- gart_info->bus_addr = bus_address;
- return ret;
-}
-EXPORT_SYMBOL(drm_ati_pcigart_init);
+++ /dev/null
-/**
- * \file drm_agpsupport.c
- * DRM support for AGP/GART backend
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#if __OS_HAS_AGP
-
-#include <asm/agp.h>
-
-/**
- * Get AGP information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a (output) drm_agp_info structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been initialized and acquired and fills in the
- * drm_agp_info structure with the information in drm_agp_head::agp_info.
- */
-int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
-{
- DRM_AGP_KERN *kern;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- kern = &dev->agp->agp_info;
- info->agp_version_major = kern->version.major;
- info->agp_version_minor = kern->version.minor;
- info->mode = kern->mode;
- info->aperture_base = kern->aper_base;
- info->aperture_size = kern->aper_size * 1024 * 1024;
- info->memory_allowed = kern->max_memory << PAGE_SHIFT;
- info->memory_used = kern->current_memory << PAGE_SHIFT;
- info->id_vendor = kern->device->vendor;
- info->id_device = kern->device->device;
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_agp_info);
-
-int drm_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_info *info = data;
- int err;
-
- err = drm_agp_info(dev, info);
- if (err)
- return err;
-
- return 0;
-}
-
-/**
- * Acquire the AGP device.
- *
- * \param dev DRM device that is to acquire AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_agp_acquire(struct drm_device * dev)
-{
- if (!dev->agp)
- return -ENODEV;
- if (dev->agp->acquired)
- return -EBUSY;
- if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
- return -ENODEV;
- dev->agp->acquired = 1;
- return 0;
-}
-
-EXPORT_SYMBOL(drm_agp_acquire);
-
-/**
- * Acquire the AGP device (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
-}
-
-/**
- * Release the AGP device.
- *
- * \param dev DRM device that is to release AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired and calls \c agp_backend_release.
- */
-int drm_agp_release(struct drm_device * dev)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- agp_backend_release(dev->agp->bridge);
- dev->agp->acquired = 0;
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_release);
-
-int drm_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_agp_release(dev);
-}
-
-/**
- * Enable the AGP bus.
- *
- * \param dev DRM device that has previously acquired AGP.
- * \param mode Requested AGP mode.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired but not enabled, and calls
- * \c agp_enable.
- */
-int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- dev->agp->mode = mode.mode;
- agp_enable(dev->agp->bridge, mode.mode);
- dev->agp->enabled = 1;
- return 0;
-}
-
-EXPORT_SYMBOL(drm_agp_enable);
-
-int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_mode *mode = data;
-
- return drm_agp_enable(dev, *mode);
-}
-
-/**
- * Allocate AGP memory.
- *
- * \param inode device inode.
- * \param file_priv file private pointer.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired, allocates the
- * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
- */
-int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
- DRM_AGP_MEM *memory;
- unsigned long pages;
- u32 type;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
- return -ENOMEM;
-
- memset(entry, 0, sizeof(*entry));
-
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- type = (u32) request->type;
- if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->handle = (unsigned long)memory->key + 1;
- entry->memory = memory;
- entry->bound = 0;
- entry->pages = pages;
- list_add(&entry->head, &dev->agp->memory);
-
- request->handle = entry->handle;
- request->physical = memory->physical;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_alloc);
-
-
-int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_agp_alloc(dev, request);
-}
-
-/**
- * Search for the AGP memory entry associated with a handle.
- *
- * \param dev DRM device structure.
- * \param handle AGP memory handle.
- * \return pointer to the drm_agp_mem structure associated with \p handle.
- *
- * Walks through drm_agp_head::memory until finding a matching handle.
- */
-static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
- unsigned long handle)
-{
- struct drm_agp_mem *entry;
-
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if (entry->handle == handle)
- return entry;
- }
- return NULL;
-}
-
-/**
- * Unbind AGP memory from the GATT (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and acquired, looks-up the AGP memory
- * entry and passes it to the unbind_agp() function.
- */
-int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int ret;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (!entry->bound)
- return -EINVAL;
- ret = drm_unbind_agp(entry->memory);
- if (ret == 0)
- entry->bound = 0;
- return ret;
-}
-EXPORT_SYMBOL(drm_agp_unbind);
-
-
-int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_agp_unbind(dev, request);
-}
-
-/**
- * Bind AGP memory into the GATT (ioctl)
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and that no memory
- * is currently bound into the GATT. Looks-up the AGP memory entry and passes
- * it to bind_agp() function.
- */
-int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int retcode;
- int page;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (entry->bound)
- return -EINVAL;
- page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
- if ((retcode = drm_bind_agp(entry->memory, page)))
- return retcode;
- entry->bound = dev->agp->base + (page << PAGE_SHIFT);
- DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
- dev->agp->base, entry->bound);
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_bind);
-
-
-int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_agp_bind(dev, request);
-}
-
-/**
- * Free AGP memory (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and looks up the
- * AGP memory entry. If the memory it's currently bound, unbind it via
- * unbind_agp(). Frees it via free_agp() as well as the entry itself
- * and unlinks from the doubly linked list it's inserted in.
- */
-int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (entry->bound)
- drm_unbind_agp(entry->memory);
-
- list_del(&entry->head);
-
- drm_free_agp(entry->memory, entry->pages);
- kfree(entry);
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_free);
-
-
-
-int drm_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_agp_free(dev, request);
-}
-
-/**
- * Initialize the AGP resources.
- *
- * \return pointer to a drm_agp_head structure.
- *
- * Gets the drm_agp_t structure which is made available by the agpgart module
- * via the inter_module_* functions. Creates and initializes a drm_agp_head
- * structure.
- */
-struct drm_agp_head *drm_agp_init(struct drm_device *dev)
-{
- struct drm_agp_head *head = NULL;
-
- if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
- return NULL;
- memset((void *)head, 0, sizeof(*head));
- head->bridge = agp_find_bridge(dev->pdev);
- if (!head->bridge) {
- if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
- kfree(head);
- return NULL;
- }
- agp_copy_info(head->bridge, &head->agp_info);
- agp_backend_release(head->bridge);
- } else {
- agp_copy_info(head->bridge, &head->agp_info);
- }
- if (head->agp_info.chipset == NOT_SUPPORTED) {
- kfree(head);
- return NULL;
- }
- INIT_LIST_HEAD(&head->memory);
- head->cant_use_aperture = head->agp_info.cant_use_aperture;
- head->page_mask = head->agp_info.page_mask;
- head->base = head->agp_info.aper_base;
- return head;
-}
-
-/**
- * Binds a collection of pages into AGP memory at the given offset, returning
- * the AGP memory structure containing them.
- *
- * No reference is held on the pages during this time -- it is up to the
- * caller to handle that.
- */
-DRM_AGP_MEM *
-drm_agp_bind_pages(struct drm_device *dev,
- struct page **pages,
- unsigned long num_pages,
- uint32_t gtt_offset,
- u32 type)
-{
- DRM_AGP_MEM *mem;
- int ret, i;
-
- DRM_DEBUG("\n");
-
- mem = agp_allocate_memory(dev->agp->bridge, num_pages,
- type);
- if (mem == NULL) {
- DRM_ERROR("Failed to allocate memory for %ld pages\n",
- num_pages);
- return NULL;
- }
-
- for (i = 0; i < num_pages; i++)
- mem->pages[i] = pages[i];
- mem->page_count = num_pages;
-
- mem->is_flushed = true;
- ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
- if (ret != 0) {
- DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
- agp_free_memory(mem);
- return NULL;
- }
-
- return mem;
-}
-EXPORT_SYMBOL(drm_agp_bind_pages);
-
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
- agp_flush_chipset(dev->agp->bridge);
-}
-EXPORT_SYMBOL(drm_agp_chipset_flush);
-
-#endif /* __OS_HAS_AGP */
+++ /dev/null
-/**
- * \file drm_auth.c
- * IOCTLs for authentication
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Find the file with the given magic number.
- *
- * \param dev DRM device.
- * \param magic magic number.
- *
- * Searches in drm_device::magiclist within all files with the same hash key
- * the one with matching magic number, while holding the drm_device::struct_mutex
- * lock.
- */
-static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
-{
- struct drm_file *retval = NULL;
- struct drm_magic_entry *pt;
- struct drm_hash_item *hash;
- struct drm_device *dev = master->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
- pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
- retval = pt->priv;
- }
- mutex_unlock(&dev->struct_mutex);
- return retval;
-}
-
-/**
- * Adds a magic number.
- *
- * \param dev DRM device.
- * \param priv file private data.
- * \param magic magic number.
- *
- * Creates a drm_magic_entry structure and appends to the linked list
- * associated the magic number hash key in drm_device::magiclist, while holding
- * the drm_device::struct_mutex lock.
- */
-static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
- drm_magic_t magic)
-{
- struct drm_magic_entry *entry;
- struct drm_device *dev = master->minor->dev;
- DRM_DEBUG("%d\n", magic);
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- entry->priv = priv;
- entry->hash_item.key = (unsigned long)magic;
- mutex_lock(&dev->struct_mutex);
- drm_ht_insert_item(&master->magiclist, &entry->hash_item);
- list_add_tail(&entry->head, &master->magicfree);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Remove a magic number.
- *
- * \param dev DRM device.
- * \param magic magic number.
- *
- * Searches and unlinks the entry in drm_device::magiclist with the magic
- * number hash key, while holding the drm_device::struct_mutex lock.
- */
-static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
-{
- struct drm_magic_entry *pt;
- struct drm_hash_item *hash;
- struct drm_device *dev = master->minor->dev;
-
- DRM_DEBUG("%d\n", magic);
-
- mutex_lock(&dev->struct_mutex);
- if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
- pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
- drm_ht_remove_item(&master->magiclist, hash);
- list_del(&pt->head);
- mutex_unlock(&dev->struct_mutex);
-
- kfree(pt);
-
- return 0;
-}
-
-/**
- * Get a unique magic number (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a resulting drm_auth structure.
- * \return zero on success, or a negative number on failure.
- *
- * If there is a magic number in drm_file::magic then use it, otherwise
- * searches an unique non-zero magic number and add it associating it with \p
- * file_priv.
- */
-int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- static drm_magic_t sequence = 0;
- static DEFINE_SPINLOCK(lock);
- struct drm_auth *auth = data;
-
- /* Find unique magic */
- if (file_priv->magic) {
- auth->magic = file_priv->magic;
- } else {
- do {
- spin_lock(&lock);
- if (!sequence)
- ++sequence; /* reserve 0 */
- auth->magic = sequence++;
- spin_unlock(&lock);
- } while (drm_find_file(file_priv->master, auth->magic));
- file_priv->magic = auth->magic;
- drm_add_magic(file_priv->master, file_priv, auth->magic);
- }
-
- DRM_DEBUG("%u\n", auth->magic);
-
- return 0;
-}
-
-/**
- * Authenticate with a magic.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_auth structure.
- * \return zero if authentication successed, or a negative number otherwise.
- *
- * Checks if \p file_priv is associated with the magic number passed in \arg.
- */
-int drm_authmagic(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_auth *auth = data;
- struct drm_file *file;
-
- DRM_DEBUG("%u\n", auth->magic);
- if ((file = drm_find_file(file_priv->master, auth->magic))) {
- file->authenticated = 1;
- drm_remove_magic(file_priv->master, auth->magic);
- return 0;
- }
- return -EINVAL;
-}
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2010 Pauli Nieminen.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Multipart buffer for coping data which is larger than the page size.
- *
- * Authors:
- * Pauli Nieminen <suokkos-at-gmail-dot-com>
- */
-
-#include "drm_buffer.h"
-
-/**
- * Allocate the drm buffer object.
- *
- * buf: Pointer to a pointer where the object is stored.
- * size: The number of bytes to allocate.
- */
-int drm_buffer_alloc(struct drm_buffer **buf, int size)
-{
- int nr_pages = size / PAGE_SIZE + 1;
- int idx;
-
- /* Allocating pointer table to end of structure makes drm_buffer
- * variable sized */
- *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
- GFP_KERNEL);
-
- if (*buf == NULL) {
- DRM_ERROR("Failed to allocate drm buffer object to hold"
- " %d bytes in %d pages.\n",
- size, nr_pages);
- return -ENOMEM;
- }
-
- (*buf)->size = size;
-
- for (idx = 0; idx < nr_pages; ++idx) {
-
- (*buf)->data[idx] =
- kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
- GFP_KERNEL);
-
-
- if ((*buf)->data[idx] == NULL) {
- DRM_ERROR("Failed to allocate %dth page for drm"
- " buffer with %d bytes and %d pages.\n",
- idx + 1, size, nr_pages);
- goto error_out;
- }
-
- }
-
- return 0;
-
-error_out:
-
- /* Only last element can be null pointer so check for it first. */
- if ((*buf)->data[idx])
- kfree((*buf)->data[idx]);
-
- for (--idx; idx >= 0; --idx)
- kfree((*buf)->data[idx]);
-
- kfree(*buf);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_buffer_alloc);
-
-/**
- * Copy the user data to the begin of the buffer and reset the processing
- * iterator.
- *
- * user_data: A pointer the data that is copied to the buffer.
- * size: The Number of bytes to copy.
- */
-int drm_buffer_copy_from_user(struct drm_buffer *buf,
- void __user *user_data, int size)
-{
- int nr_pages = size / PAGE_SIZE + 1;
- int idx;
-
- if (size > buf->size) {
- DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
- " %d bytes space\n",
- size, buf->size);
- return -EFAULT;
- }
-
- for (idx = 0; idx < nr_pages; ++idx) {
-
- if (DRM_COPY_FROM_USER(buf->data[idx],
- user_data + idx * PAGE_SIZE,
- min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
- DRM_ERROR("Failed to copy user data (%p) to drm buffer"
- " (%p) %dth page.\n",
- user_data, buf, idx);
- return -EFAULT;
-
- }
- }
- buf->iterator = 0;
- return 0;
-}
-EXPORT_SYMBOL(drm_buffer_copy_from_user);
-
-/**
- * Free the drm buffer object
- */
-void drm_buffer_free(struct drm_buffer *buf)
-{
-
- if (buf != NULL) {
-
- int nr_pages = buf->size / PAGE_SIZE + 1;
- int idx;
- for (idx = 0; idx < nr_pages; ++idx)
- kfree(buf->data[idx]);
-
- kfree(buf);
- }
-}
-EXPORT_SYMBOL(drm_buffer_free);
-
-/**
- * Read an object from buffer that may be split to multiple parts. If object
- * is not split function just returns the pointer to object in buffer. But in
- * case of split object data is copied to given stack object that is suplied
- * by caller.
- *
- * The processing location of the buffer is also advanced to the next byte
- * after the object.
- *
- * objsize: The size of the objet in bytes.
- * stack_obj: A pointer to a memory location where object can be copied.
- */
-void *drm_buffer_read_object(struct drm_buffer *buf,
- int objsize, void *stack_obj)
-{
- int idx = drm_buffer_index(buf);
- int page = drm_buffer_page(buf);
- void *obj = NULL;
-
- if (idx + objsize <= PAGE_SIZE) {
- obj = &buf->data[page][idx];
- } else {
- /* The object is split which forces copy to temporary object.*/
- int beginsz = PAGE_SIZE - idx;
- memcpy(stack_obj, &buf->data[page][idx], beginsz);
-
- memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
- objsize - beginsz);
-
- obj = stack_obj;
- }
-
- drm_buffer_advance(buf, objsize);
- return obj;
-}
-EXPORT_SYMBOL(drm_buffer_read_object);
+++ /dev/null
-/**
- * \file drm_bufs.c
- * Generic buffer template
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/log2.h>
-#include <asm/shmparam.h>
-#include "drmP.h"
-
-static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
- struct drm_local_map *map)
-{
- struct drm_map_list *entry;
- list_for_each_entry(entry, &dev->maplist, head) {
- /*
- * Because the kernel-userspace ABI is fixed at a 32-bit offset
- * while PCI resources may live above that, we ignore the map
- * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
- * It is assumed that each driver will have only one resource of
- * each type.
- */
- if (!entry->map ||
- map->type != entry->map->type ||
- entry->master != dev->primary->master)
- continue;
- switch (map->type) {
- case _DRM_SHM:
- if (map->flags != _DRM_CONTAINS_LOCK)
- break;
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- return entry;
- default: /* Make gcc happy */
- ;
- }
- if (entry->map->offset == map->offset)
- return entry;
- }
-
- return NULL;
-}
-
-static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
- unsigned long user_token, int hashed_handle, int shm)
-{
- int use_hashed_handle, shift;
- unsigned long add;
-
-#if (BITS_PER_LONG == 64)
- use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
-#elif (BITS_PER_LONG == 32)
- use_hashed_handle = hashed_handle;
-#else
-#error Unsupported long size. Neither 64 nor 32 bits.
-#endif
-
- if (!use_hashed_handle) {
- int ret;
- hash->key = user_token >> PAGE_SHIFT;
- ret = drm_ht_insert_item(&dev->map_hash, hash);
- if (ret != -EINVAL)
- return ret;
- }
-
- shift = 0;
- add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
- if (shm && (SHMLBA > PAGE_SIZE)) {
- int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
-
- /* For shared memory, we have to preserve the SHMLBA
- * bits of the eventual vma->vm_pgoff value during
- * mmap(). Otherwise we run into cache aliasing problems
- * on some platforms. On these platforms, the pgoff of
- * a mmap() request is used to pick a suitable virtual
- * address for the mmap() region such that it will not
- * cause cache aliasing problems.
- *
- * Therefore, make sure the SHMLBA relevant bits of the
- * hash value we use are equal to those in the original
- * kernel virtual address.
- */
- shift = bits;
- add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
- }
-
- return drm_ht_just_insert_please(&dev->map_hash, hash,
- user_token, 32 - PAGE_SHIFT - 3,
- shift, add);
-}
-
-/**
- * Core function to create a range of memory available for mapping by a
- * non-root process.
- *
- * Adjusts the memory offset to its absolute value according to the mapping
- * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
- * applicable and if supported by the kernel.
- */
-static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags,
- struct drm_map_list ** maplist)
-{
- struct drm_local_map *map;
- struct drm_map_list *list;
- drm_dma_handle_t *dmah;
- unsigned long user_token;
- int ret;
-
- map = kmalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- return -ENOMEM;
-
- map->offset = offset;
- map->size = size;
- map->flags = flags;
- map->type = type;
-
- /* Only allow shared memory to be removable since we only keep enough
- * book keeping information about shared memory to allow for removal
- * when processes fork.
- */
- if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
- kfree(map);
- return -EINVAL;
- }
- DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
- (unsigned long long)map->offset, map->size, map->type);
-
- /* page-align _DRM_SHM maps. They are allocated here so there is no security
- * hole created by that and it works around various broken drivers that use
- * a non-aligned quantity to map the SAREA. --BenH
- */
- if (map->type == _DRM_SHM)
- map->size = PAGE_ALIGN(map->size);
-
- if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
- kfree(map);
- return -EINVAL;
- }
- map->mtrr = -1;
- map->handle = NULL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
- if (map->offset + (map->size-1) < map->offset ||
- map->offset < virt_to_phys(high_memory)) {
- kfree(map);
- return -EINVAL;
- }
-#endif
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
- /* Some drivers preinitialize some maps, without the X Server
- * needing to be aware of it. Therefore, we just return success
- * when the server tries to create a duplicate map.
- */
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if (list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size,
- list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
-
- if (drm_core_has_MTRR(dev)) {
- if (map->type == _DRM_FRAME_BUFFER ||
- (map->flags & _DRM_WRITE_COMBINING)) {
- map->mtrr = mtrr_add(map->offset, map->size,
- MTRR_TYPE_WRCOMB, 1);
- }
- }
- if (map->type == _DRM_REGISTERS) {
- map->handle = ioremap(map->offset, map->size);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- }
-
- break;
- case _DRM_SHM:
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if(list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size, list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
- map->handle = vmalloc_user(map->size);
- DRM_DEBUG("%lu %d %p\n",
- map->size, drm_order(map->size), map->handle);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- map->offset = (unsigned long)map->handle;
- if (map->flags & _DRM_CONTAINS_LOCK) {
- /* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->primary->master->lock.hw_lock != NULL) {
- vfree(map->handle);
- kfree(map);
- return -EBUSY;
- }
- dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
- }
- break;
- case _DRM_AGP: {
- struct drm_agp_mem *entry;
- int valid = 0;
-
- if (!drm_core_has_AGP(dev)) {
- kfree(map);
- return -EINVAL;
- }
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
- /* In some cases (i810 driver), user space may have already
- * added the AGP base itself, because dev->agp->base previously
- * only got set during AGP enable. So, only add the base
- * address if the map's offset isn't already within the
- * aperture.
- */
- if (map->offset < dev->agp->base ||
- map->offset > dev->agp->base +
- dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
- map->offset += dev->agp->base;
- }
- map->mtrr = dev->agp->agp_mtrr; /* for getmap */
-
- /* This assumes the DRM is in total control of AGP space.
- * It's not always the case as AGP can be in the control
- * of user space (i.e. i810 driver). So this loop will get
- * skipped and we double check that dev->agp->memory is
- * actually set as well as being invalid before EPERM'ing
- */
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if ((map->offset >= entry->bound) &&
- (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- kfree(map);
- return -EPERM;
- }
- DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
- (unsigned long long)map->offset, map->size);
-
- break;
- }
- case _DRM_GEM:
- DRM_ERROR("tried to addmap GEM object\n");
- break;
- case _DRM_SCATTER_GATHER:
- if (!dev->sg) {
- kfree(map);
- return -EINVAL;
- }
- map->offset += (unsigned long)dev->sg->virtual;
- break;
- case _DRM_CONSISTENT:
- /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
- * As we're limiting the address to 2^32-1 (or less),
- * casting it down to 32 bits is no problem, but we
- * need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size);
- if (!dmah) {
- kfree(map);
- return -ENOMEM;
- }
- map->handle = dmah->vaddr;
- map->offset = (unsigned long)dmah->busaddr;
- kfree(dmah);
- break;
- default:
- kfree(map);
- return -EINVAL;
- }
-
- list = kzalloc(sizeof(*list), GFP_KERNEL);
- if (!list) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- return -EINVAL;
- }
- list->map = map;
-
- mutex_lock(&dev->struct_mutex);
- list_add(&list->head, &dev->maplist);
-
- /* Assign a 32-bit handle */
- /* We do it here so that dev->struct_mutex protects the increment */
- user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
- map->offset;
- ret = drm_map_handle(dev, &list->hash, user_token, 0,
- (map->type == _DRM_SHM));
- if (ret) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- kfree(list);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- list->user_token = list->hash.key << PAGE_SHIFT;
- mutex_unlock(&dev->struct_mutex);
-
- if (!(map->flags & _DRM_DRIVER))
- list->master = dev->primary->master;
- *maplist = list;
- return 0;
- }
-
-int drm_addmap(struct drm_device * dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map ** map_ptr)
-{
- struct drm_map_list *list;
- int rc;
-
- rc = drm_addmap_core(dev, offset, size, type, flags, &list);
- if (!rc)
- *map_ptr = list->map;
- return rc;
-}
-
-EXPORT_SYMBOL(drm_addmap);
-
-/**
- * Ioctl to specify a range of memory that is available for mapping by a
- * non-root process.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_map structure.
- * \return zero on success or a negative value on error.
- *
- */
-int drm_addmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *maplist;
- int err;
-
- if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
- return -EPERM;
-
- err = drm_addmap_core(dev, map->offset, map->size, map->type,
- map->flags, &maplist);
-
- if (err)
- return err;
-
- /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
- map->handle = (void *)(unsigned long)maplist->user_token;
- return 0;
-}
-
-/**
- * Remove a map private from list and deallocate resources if the mapping
- * isn't in use.
- *
- * Searches the map on drm_device::maplist, removes it from the list, see if
- * its being used, and free any associate resource (such as MTRR's) if it's not
- * being on use.
- *
- * \sa drm_addmap
- */
-int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
-{
- struct drm_map_list *r_list = NULL, *list_t;
- drm_dma_handle_t dmah;
- int found = 0;
- struct drm_master *master;
-
- /* Find the list entry for the map and remove it */
- list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
- if (r_list->map == map) {
- master = r_list->master;
- list_del(&r_list->head);
- drm_ht_remove_key(&dev->map_hash,
- r_list->user_token >> PAGE_SHIFT);
- kfree(r_list);
- found = 1;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- iounmap(map->handle);
- /* FALLTHROUGH */
- case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
- int retcode;
- retcode = mtrr_del(map->mtrr, map->offset, map->size);
- DRM_DEBUG("mtrr_del=%d\n", retcode);
- }
- break;
- case _DRM_SHM:
- vfree(map->handle);
- if (master) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL; /* SHM removed */
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_pci_free(dev, &dmah);
- break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
- }
- kfree(map);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_rmmap_locked);
-
-int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
-{
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm_rmmap_locked(dev, map);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_rmmap);
-
-/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
- * the last close of the device, and this is necessary for cleanup when things
- * exit uncleanly. Therefore, having userland manually remove mappings seems
- * like a pointless exercise since they're going away anyway.
- *
- * One use case might be after addmap is allowed for normal users for SHM and
- * gets used by drivers that the server doesn't need to care about. This seems
- * unlikely.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_map structure.
- * \return zero on success or a negative value on error.
- */
-int drm_rmmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->user_token == (unsigned long)request->handle &&
- r_list->map->flags & _DRM_REMOVABLE) {
- map = r_list->map;
- break;
- }
- }
-
- /* List has wrapped around to the head pointer, or its empty we didn't
- * find anything.
- */
- if (list_empty(&dev->maplist) || !map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- /* Register and framebuffer maps are permanent */
- if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- ret = drm_rmmap_locked(dev, map);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-/**
- * Cleanup after an error on one of the addbufs() functions.
- *
- * \param dev DRM device.
- * \param entry buffer entry where the error occurred.
- *
- * Frees any pages and buffers associated with the given entry.
- */
-static void drm_cleanup_buf_error(struct drm_device * dev,
- struct drm_buf_entry * entry)
-{
- int i;
-
- if (entry->seg_count) {
- for (i = 0; i < entry->seg_count; i++) {
- if (entry->seglist[i]) {
- drm_pci_free(dev, entry->seglist[i]);
- }
- }
- kfree(entry->seglist);
-
- entry->seg_count = 0;
- }
-
- if (entry->buf_count) {
- for (i = 0; i < entry->buf_count; i++) {
- kfree(entry->buflist[i].dev_private);
- }
- kfree(entry->buflist);
-
- entry->buf_count = 0;
- }
-}
-
-#if __OS_HAS_AGP
-/**
- * Add AGP buffers for DMA transfers.
- *
- * \param dev struct drm_device to which the buffers are to be added.
- * \param request pointer to a struct drm_buf_desc describing the request.
- * \return zero on success or a negative number on failure.
- *
- * After some sanity checks creates a drm_buf structure for each buffer and
- * reallocates the buffer list of the same size order to accommodate the new
- * buffers.
- */
-int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_agp_mem *agp_entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i, valid;
- struct drm_buf **temp_buflist;
-
- if (!dma)
- return -EINVAL;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = dev->agp->base + request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lx\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- /* Make sure buffers are located in AGP memory that we own */
- valid = 0;
- list_for_each_entry(agp_entry, &dev->agp->memory, head) {
- if ((agp_offset >= agp_entry->bound) &&
- (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- DRM_DEBUG("zone invalid\n");
- return -EINVAL;
- }
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_AGP;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-EXPORT_SYMBOL(drm_addbufs_agp);
-#endif /* __OS_HAS_AGP */
-
-int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- int count;
- int order;
- int size;
- int total;
- int page_order;
- struct drm_buf_entry *entry;
- drm_dma_handle_t *dmah;
- struct drm_buf *buf;
- int alignment;
- unsigned long offset;
- int i;
- int byte_count;
- int page_count;
- unsigned long *temp_pagelist;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
- request->count, request->size, size, order, dev->queue_count);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
- if (!entry->seglist) {
- kfree(entry->buflist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- /* Keep the original pagelist until we know all the allocations
- * have succeeded
- */
- temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
- sizeof(*dma->pagelist), GFP_KERNEL);
- if (!temp_pagelist) {
- kfree(entry->buflist);
- kfree(entry->seglist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memcpy(temp_pagelist,
- dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
- DRM_DEBUG("pagelist: %d entries\n",
- dma->page_count + (count << page_order));
-
- entry->buf_size = size;
- entry->page_order = page_order;
- byte_count = 0;
- page_count = 0;
-
- while (entry->buf_count < count) {
-
- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
-
- if (!dmah) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- entry->seglist[entry->seg_count++] = dmah;
- for (i = 0; i < (1 << page_order); i++) {
- DRM_DEBUG("page %d @ 0x%08lx\n",
- dma->page_count + page_count,
- (unsigned long)dmah->vaddr + PAGE_SIZE * i);
- temp_pagelist[dma->page_count + page_count++]
- = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
- }
- for (offset = 0;
- offset + size <= total && entry->buf_count < count;
- offset += alignment, ++entry->buf_count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
- buf->offset = (dma->byte_count + byte_count + offset);
- buf->address = (void *)(dmah->vaddr + offset);
- buf->bus_address = dmah->busaddr + offset;
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size,
- GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n",
- entry->buf_count, buf->address);
- }
- byte_count += PAGE_SIZE << page_order;
- }
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- /* No allocations failed, so now we can replace the original pagelist
- * with the new one.
- */
- if (dma->page_count) {
- kfree(dma->pagelist);
- }
- dma->pagelist = temp_pagelist;
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += entry->seg_count << page_order;
- dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- if (request->flags & _DRM_PCI_BUFFER_RO)
- dma->flags = _DRM_DMA_USE_PCI_RO;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-
-}
-EXPORT_SYMBOL(drm_addbufs_pci);
-
-static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kzalloc(count * sizeof(*entry->buflist),
- GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset
- + (unsigned long)dev->sg->virtual);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_SG;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kzalloc(count * sizeof(*entry->buflist),
- GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_FB;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-
-/**
- * Add buffers for DMA transfers (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_buf_desc request.
- * \return zero on success or a negative number on failure.
- *
- * According with the memory type specified in drm_buf_desc::flags and the
- * build options, it dispatches the call either to addbufs_agp(),
- * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
- * PCI memory respectively.
- */
-int drm_addbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_desc *request = data;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
-#if __OS_HAS_AGP
- if (request->flags & _DRM_AGP_BUFFER)
- ret = drm_addbufs_agp(dev, request);
- else
-#endif
- if (request->flags & _DRM_SG_BUFFER)
- ret = drm_addbufs_sg(dev, request);
- else if (request->flags & _DRM_FB_BUFFER)
- ret = drm_addbufs_fb(dev, request);
- else
- ret = drm_addbufs_pci(dev, request);
-
- return ret;
-}
-
-/**
- * Get information about the buffer mappings.
- *
- * This was originally mean for debugging purposes, or by a sophisticated
- * client library to determine how best to use the available buffers (e.g.,
- * large buffers can be used for image transfer).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_info structure.
- * \return zero on success or a negative number on failure.
- *
- * Increments drm_device::buf_use while holding the drm_device::count_lock
- * lock, preventing of allocating more buffers after this call. Information
- * about each requested buffer is then copied into user space.
- */
-int drm_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_info *request = data;
- int i;
- int count;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->count_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- ++dev->buf_use; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
-
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count)
- ++count;
- }
-
- DRM_DEBUG("count = %d\n", count);
-
- if (request->count >= count) {
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count) {
- struct drm_buf_desc __user *to =
- &request->list[count];
- struct drm_buf_entry *from = &dma->bufs[i];
- struct drm_freelist *list = &dma->bufs[i].freelist;
- if (copy_to_user(&to->count,
- &from->buf_count,
- sizeof(from->buf_count)) ||
- copy_to_user(&to->size,
- &from->buf_size,
- sizeof(from->buf_size)) ||
- copy_to_user(&to->low_mark,
- &list->low_mark,
- sizeof(list->low_mark)) ||
- copy_to_user(&to->high_mark,
- &list->high_mark,
- sizeof(list->high_mark)))
- return -EFAULT;
-
- DRM_DEBUG("%d %d %d %d %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].buf_size,
- dma->bufs[i].freelist.low_mark,
- dma->bufs[i].freelist.high_mark);
- ++count;
- }
- }
- }
- request->count = count;
-
- return 0;
-}
-
-/**
- * Specifies a low and high water mark for buffer allocation
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg a pointer to a drm_buf_desc structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies that the size order is bounded between the admissible orders and
- * updates the respective drm_device_dma::bufs entry low and high water mark.
- *
- * \note This ioctl is deprecated and mostly never used.
- */
-int drm_markbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_desc *request = data;
- int order;
- struct drm_buf_entry *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d, %d, %d\n",
- request->size, request->low_mark, request->high_mark);
- order = drm_order(request->size);
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- entry = &dma->bufs[order];
-
- if (request->low_mark < 0 || request->low_mark > entry->buf_count)
- return -EINVAL;
- if (request->high_mark < 0 || request->high_mark > entry->buf_count)
- return -EINVAL;
-
- entry->freelist.low_mark = request->low_mark;
- entry->freelist.high_mark = request->high_mark;
-
- return 0;
-}
-
-/**
- * Unreserve the buffers in list, previously reserved using drmDMA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_free structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls free_buffer() for each used buffer.
- * This function is primarily used for debugging.
- */
-int drm_freebufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_free *request = data;
- int i;
- int idx;
- struct drm_buf *buf;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d\n", request->count);
- for (i = 0; i < request->count; i++) {
- if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
- return -EFAULT;
- if (idx < 0 || idx >= dma->buf_count) {
- DRM_ERROR("Index %d (of %d max)\n",
- idx, dma->buf_count - 1);
- return -EINVAL;
- }
- buf = dma->buflist[idx];
- if (buf->file_priv != file_priv) {
- DRM_ERROR("Process %d freeing buffer not owned\n",
- task_pid_nr(current));
- return -EINVAL;
- }
- drm_free_buffer(dev, buf);
- }
-
- return 0;
-}
-
-/**
- * Maps all of the DMA buffers into client-virtual space (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
- * about each buffer into user space. For PCI buffers, it calls do_mmap() with
- * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
- * drm_mmap_dma().
- */
-int drm_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int retcode = 0;
- const int zero = 0;
- unsigned long virtual;
- unsigned long address;
- struct drm_buf_map *request = data;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->count_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- dev->buf_use++; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
-
- if (request->count >= dma->buf_count) {
- if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
- || (drm_core_check_feature(dev, DRIVER_SG)
- && (dma->flags & _DRM_DMA_USE_SG))
- || (drm_core_check_feature(dev, DRIVER_FB_DMA)
- && (dma->flags & _DRM_DMA_USE_FB))) {
- struct drm_local_map *map = dev->agp_buffer_map;
- unsigned long token = dev->agp_buffer_token;
-
- if (!map) {
- retcode = -EINVAL;
- goto done;
- }
- down_write(¤t->mm->mmap_sem);
- virtual = do_mmap(file_priv->filp, 0, map->size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- token);
- up_write(¤t->mm->mmap_sem);
- } else {
- down_write(¤t->mm->mmap_sem);
- virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
- PROT_READ | PROT_WRITE,
- MAP_SHARED, 0);
- up_write(¤t->mm->mmap_sem);
- }
- if (virtual > -1024UL) {
- /* Real error */
- retcode = (signed long)virtual;
- goto done;
- }
- request->virtual = (void __user *)virtual;
-
- for (i = 0; i < dma->buf_count; i++) {
- if (copy_to_user(&request->list[i].idx,
- &dma->buflist[i]->idx,
- sizeof(request->list[0].idx))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request->list[i].total,
- &dma->buflist[i]->total,
- sizeof(request->list[0].total))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request->list[i].used,
- &zero, sizeof(zero))) {
- retcode = -EFAULT;
- goto done;
- }
- address = virtual + dma->buflist[i]->offset; /* *** */
- if (copy_to_user(&request->list[i].address,
- &address, sizeof(address))) {
- retcode = -EFAULT;
- goto done;
- }
- }
- }
- done:
- request->count = dma->buf_count;
- DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
-
- return retcode;
-}
-
-/**
- * Compute size order. Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- *
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order(unsigned long size)
-{
- int order;
- unsigned long tmp;
-
- for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
-
- if (size & (size - 1))
- ++order;
-
- return order;
-}
-EXPORT_SYMBOL(drm_order);
+++ /dev/null
-/**************************************************************************
- *
- * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-
-#if defined(CONFIG_X86)
-static void
-drm_clflush_page(struct page *page)
-{
- uint8_t *page_virtual;
- unsigned int i;
-
- if (unlikely(page == NULL))
- return;
-
- page_virtual = kmap_atomic(page, KM_USER0);
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- clflush(page_virtual + i);
- kunmap_atomic(page_virtual, KM_USER0);
-}
-
-static void drm_cache_flush_clflush(struct page *pages[],
- unsigned long num_pages)
-{
- unsigned long i;
-
- mb();
- for (i = 0; i < num_pages; i++)
- drm_clflush_page(*pages++);
- mb();
-}
-
-static void
-drm_clflush_ipi_handler(void *null)
-{
- wbinvd();
-}
-#endif
-
-void
-drm_clflush_pages(struct page *pages[], unsigned long num_pages)
-{
-
-#if defined(CONFIG_X86)
- if (cpu_has_clflush) {
- drm_cache_flush_clflush(pages, num_pages);
- return;
- }
-
- if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
-
-#elif defined(__powerpc__)
- unsigned long i;
- for (i = 0; i < num_pages; i++) {
- struct page *page = pages[i];
- void *page_virtual;
-
- if (unlikely(page == NULL))
- continue;
-
- page_virtual = kmap_atomic(page, KM_USER0);
- flush_dcache_range((unsigned long)page_virtual,
- (unsigned long)page_virtual + PAGE_SIZE);
- kunmap_atomic(page_virtual, KM_USER0);
- }
-#else
- printk(KERN_ERR "Architecture has no drm_cache.c support\n");
- WARN_ON_ONCE(1);
-#endif
-}
-EXPORT_SYMBOL(drm_clflush_pages);
+++ /dev/null
-/**
- * \file drm_context.c
- * IOCTLs for generic contexts
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * ChangeLog:
- * 2001-11-16 Torsten Duwe <duwe@caldera.de>
- * added context constructor/destructor hooks,
- * needed by SiS driver's memory management.
- */
-
-#include "drmP.h"
-
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-/**
- * Free a handle from the context bitmap.
- *
- * \param dev DRM device.
- * \param ctx_handle context handle.
- *
- * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
- * lock.
- */
-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
-{
- mutex_lock(&dev->struct_mutex);
- idr_remove(&dev->ctx_idr, ctx_handle);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * Context bitmap allocation.
- *
- * \param dev DRM device.
- * \return (non-negative) context handle on success or a negative number on failure.
- *
- * Allocate a new idr from drm_device::ctx_idr while holding the
- * drm_device::struct_mutex lock.
- */
-static int drm_ctxbitmap_next(struct drm_device * dev)
-{
- int new_id;
- int ret;
-
-again:
- if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
- mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&dev->ctx_idr, NULL,
- DRM_RESERVED_CONTEXTS, &new_id);
- if (ret == -EAGAIN) {
- mutex_unlock(&dev->struct_mutex);
- goto again;
- }
- mutex_unlock(&dev->struct_mutex);
- return new_id;
-}
-
-/**
- * Context bitmap initialization.
- *
- * \param dev DRM device.
- *
- * Initialise the drm_device::ctx_idr
- */
-int drm_ctxbitmap_init(struct drm_device * dev)
-{
- idr_init(&dev->ctx_idr);
- return 0;
-}
-
-/**
- * Context bitmap cleanup.
- *
- * \param dev DRM device.
- *
- * Free all idr members using drm_ctx_sarea_free helper function
- * while holding the drm_device::struct_mutex lock.
- */
-void drm_ctxbitmap_cleanup(struct drm_device * dev)
-{
- mutex_lock(&dev->struct_mutex);
- idr_remove_all(&dev->ctx_idr);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name Per Context SAREA Support */
-/*@{*/
-
-/**
- * Get per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Gets the map from drm_device::ctx_idr with the handle specified and
- * returns its handle.
- */
-int drm_getsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map;
- struct drm_map_list *_entry;
-
- mutex_lock(&dev->struct_mutex);
-
- map = idr_find(&dev->ctx_idr, request->ctx_id);
- if (!map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- request->handle = NULL;
- list_for_each_entry(_entry, &dev->maplist, head) {
- if (_entry->map == map) {
- request->handle =
- (void *)(unsigned long)_entry->user_token;
- break;
- }
- }
- if (request->handle == NULL)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * Set per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Searches the mapping specified in \p arg and update the entry in
- * drm_device::ctx_idr with it.
- */
-int drm_setsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list = NULL;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map
- && r_list->user_token == (unsigned long) request->handle)
- goto found;
- }
- bad:
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
-
- found:
- map = r_list->map;
- if (!map)
- goto bad;
-
- if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
- goto bad;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name The actual DRM context handling routines */
-/*@{*/
-
-/**
- * Switch context.
- *
- * \param dev DRM device.
- * \param old old context handle.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Attempt to set drm_device::context_flag.
- */
-static int drm_context_switch(struct drm_device * dev, int old, int new)
-{
- if (test_and_set_bit(0, &dev->context_flag)) {
- DRM_ERROR("Reentering -- FIXME\n");
- return -EBUSY;
- }
-
- DRM_DEBUG("Context switch from %d to %d\n", old, new);
-
- if (new == dev->last_context) {
- clear_bit(0, &dev->context_flag);
- return 0;
- }
-
- return 0;
-}
-
-/**
- * Complete context switch.
- *
- * \param dev DRM device.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Updates drm_device::last_context and drm_device::last_switch. Verifies the
- * hardware lock is held, clears the drm_device::context_flag and wakes up
- * drm_device::context_wait.
- */
-static int drm_context_switch_complete(struct drm_device *dev,
- struct drm_file *file_priv, int new)
-{
- dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
- dev->last_switch = jiffies;
-
- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
- DRM_ERROR("Lock isn't held after context switch\n");
- }
-
- /* If a context switch is ever initiated
- when the kernel holds the lock, release
- that lock here. */
- clear_bit(0, &dev->context_flag);
- wake_up(&dev->context_wait);
-
- return 0;
-}
-
-/**
- * Reserve contexts.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_res structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_resctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_res *res = data;
- struct drm_ctx ctx;
- int i;
-
- if (res->count >= DRM_RESERVED_CONTEXTS) {
- memset(&ctx, 0, sizeof(ctx));
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- ctx.handle = i;
- if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
- return -EFAULT;
- }
- }
- res->count = DRM_RESERVED_CONTEXTS;
-
- return 0;
-}
-
-/**
- * Add context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Get a new handle for the context and copy to userspace.
- */
-int drm_addctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_list *ctx_entry;
- struct drm_ctx *ctx = data;
-
- ctx->handle = drm_ctxbitmap_next(dev);
- if (ctx->handle == DRM_KERNEL_CONTEXT) {
- /* Skip kernel's context and get a new one. */
- ctx->handle = drm_ctxbitmap_next(dev);
- }
- DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle == -1) {
- DRM_DEBUG("Not enough free contexts.\n");
- /* Should this return -EBUSY instead? */
- return -ENOMEM;
- }
-
- ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
- if (!ctx_entry) {
- DRM_DEBUG("out of memory\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&ctx_entry->head);
- ctx_entry->handle = ctx->handle;
- ctx_entry->tag = file_priv;
-
- mutex_lock(&dev->ctxlist_mutex);
- list_add(&ctx_entry->head, &dev->ctxlist);
- ++dev->ctx_count;
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- /* This does nothing */
- return 0;
-}
-
-/**
- * Get context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- /* This is 0, because we don't handle any context flags */
- ctx->flags = 0;
-
- return 0;
-}
-
-/**
- * Switch context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch().
- */
-int drm_switchctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- DRM_DEBUG("%d\n", ctx->handle);
- return drm_context_switch(dev, dev->last_context, ctx->handle);
-}
-
-/**
- * New context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch_complete().
- */
-int drm_newctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- DRM_DEBUG("%d\n", ctx->handle);
- drm_context_switch_complete(dev, file_priv, ctx->handle);
-
- return 0;
-}
-
-/**
- * Remove context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
- */
-int drm_rmctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev, ctx->handle);
- drm_ctxbitmap_free(dev, ctx->handle);
- }
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->handle == ctx->handle) {
- list_del(&pos->head);
- kfree(pos);
- --dev->ctx_count;
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-/*@}*/
+++ /dev/null
-/*
- * Copyright (c) 2006-2008 Intel Corporation
- * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2008 Red Hat Inc.
- *
- * DRM core CRTC related functions
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- *
- * Authors:
- * Keith Packard
- * Eric Anholt <eric@anholt.net>
- * Dave Airlie <airlied@linux.ie>
- * Jesse Barnes <jesse.barnes@intel.com>
- */
-#include <linux/list.h>
-#include <linux/slab.h>
-#include "drm.h"
-#include "drmP.h"
-#include "drm_crtc.h"
-#include "drm_edid.h"
-
-struct drm_prop_enum_list {
- int type;
- char *name;
-};
-
-/* Avoid boilerplate. I'm tired of typing. */
-#define DRM_ENUM_NAME_FN(fnname, list) \
- char *fnname(int val) \
- { \
- int i; \
- for (i = 0; i < ARRAY_SIZE(list); i++) { \
- if (list[i].type == val) \
- return list[i].name; \
- } \
- return "(unknown)"; \
- }
-
-/*
- * Global properties
- */
-static struct drm_prop_enum_list drm_dpms_enum_list[] =
-{ { DRM_MODE_DPMS_ON, "On" },
- { DRM_MODE_DPMS_STANDBY, "Standby" },
- { DRM_MODE_DPMS_SUSPEND, "Suspend" },
- { DRM_MODE_DPMS_OFF, "Off" }
-};
-
-DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
-
-/*
- * Optional properties
- */
-static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
-{
- { DRM_MODE_SCALE_NONE, "None" },
- { DRM_MODE_SCALE_FULLSCREEN, "Full" },
- { DRM_MODE_SCALE_CENTER, "Center" },
- { DRM_MODE_SCALE_ASPECT, "Full aspect" },
-};
-
-static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
-{
- { DRM_MODE_DITHERING_OFF, "Off" },
- { DRM_MODE_DITHERING_ON, "On" },
- { DRM_MODE_DITHERING_AUTO, "Automatic" },
-};
-
-/*
- * Non-global properties, but "required" for certain connectors.
- */
-static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
- { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
-
-static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
- { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
- drm_dvi_i_subconnector_enum_list)
-
-static struct drm_prop_enum_list drm_tv_select_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-
-static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
- drm_tv_subconnector_enum_list)
-
-static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
- { DRM_MODE_DIRTY_OFF, "Off" },
- { DRM_MODE_DIRTY_ON, "On" },
- { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
-};
-
-DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
- drm_dirty_info_enum_list)
-
-struct drm_conn_prop_enum_list {
- int type;
- char *name;
- int count;
-};
-
-/*
- * Connector and encoder types.
- */
-static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
- { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
- { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
- { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
- { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
- { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
- { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
- { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
- { DRM_MODE_CONNECTOR_Component, "Component", 0 },
- { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
- { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
- { DRM_MODE_CONNECTOR_TV, "TV", 0 },
- { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
-};
-
-static struct drm_prop_enum_list drm_encoder_enum_list[] =
-{ { DRM_MODE_ENCODER_NONE, "None" },
- { DRM_MODE_ENCODER_DAC, "DAC" },
- { DRM_MODE_ENCODER_TMDS, "TMDS" },
- { DRM_MODE_ENCODER_LVDS, "LVDS" },
- { DRM_MODE_ENCODER_TVDAC, "TV" },
-};
-
-char *drm_get_encoder_name(struct drm_encoder *encoder)
-{
- static char buf[32];
-
- snprintf(buf, 32, "%s-%d",
- drm_encoder_enum_list[encoder->encoder_type].name,
- encoder->base.id);
- return buf;
-}
-EXPORT_SYMBOL(drm_get_encoder_name);
-
-char *drm_get_connector_name(struct drm_connector *connector)
-{
- static char buf[32];
-
- snprintf(buf, 32, "%s-%d",
- drm_connector_enum_list[connector->connector_type].name,
- connector->connector_type_id);
- return buf;
-}
-EXPORT_SYMBOL(drm_get_connector_name);
-
-char *drm_get_connector_status_name(enum drm_connector_status status)
-{
- if (status == connector_status_connected)
- return "connected";
- else if (status == connector_status_disconnected)
- return "disconnected";
- else
- return "unknown";
-}
-
-/**
- * drm_mode_object_get - allocate a new identifier
- * @dev: DRM device
- * @ptr: object pointer, used to generate unique ID
- * @type: object type
- *
- * LOCKING:
- *
- * Create a unique identifier based on @ptr in @dev's identifier space. Used
- * for tracking modes, CRTCs and connectors.
- *
- * RETURNS:
- * New unique (relative to other objects in @dev) integer identifier for the
- * object.
- */
-static int drm_mode_object_get(struct drm_device *dev,
- struct drm_mode_object *obj, uint32_t obj_type)
-{
- int new_id = 0;
- int ret;
-
-again:
- if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Ran out memory getting a mode number\n");
- return -EINVAL;
- }
-
- mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
- mutex_unlock(&dev->mode_config.idr_mutex);
- if (ret == -EAGAIN)
- goto again;
-
- obj->id = new_id;
- obj->type = obj_type;
- return 0;
-}
-
-/**
- * drm_mode_object_put - free an identifer
- * @dev: DRM device
- * @id: ID to free
- *
- * LOCKING:
- * Caller must hold DRM mode_config lock.
- *
- * Free @id from @dev's unique identifier pool.
- */
-static void drm_mode_object_put(struct drm_device *dev,
- struct drm_mode_object *object)
-{
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.crtc_idr, object->id);
- mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
- uint32_t id, uint32_t type)
-{
- struct drm_mode_object *obj = NULL;
-
- mutex_lock(&dev->mode_config.idr_mutex);
- obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (obj->type != type) || (obj->id != id))
- obj = NULL;
- mutex_unlock(&dev->mode_config.idr_mutex);
-
- return obj;
-}
-EXPORT_SYMBOL(drm_mode_object_find);
-
-/**
- * drm_framebuffer_init - initialize a framebuffer
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Allocates an ID for the framebuffer's parent mode object, sets its mode
- * functions & device file and adds it to the master fd list.
- *
- * RETURNS:
- * Zero on success, error code on failure.
- */
-int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
- const struct drm_framebuffer_funcs *funcs)
-{
- int ret;
-
- ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
- if (ret) {
- return ret;
- }
-
- fb->dev = dev;
- fb->funcs = funcs;
- dev->mode_config.num_fb++;
- list_add(&fb->head, &dev->mode_config.fb_list);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_framebuffer_init);
-
-/**
- * drm_framebuffer_cleanup - remove a framebuffer object
- * @fb: framebuffer to remove
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
- * it, setting it to NULL.
- */
-void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
-{
- struct drm_device *dev = fb->dev;
- struct drm_crtc *crtc;
- struct drm_mode_set set;
- int ret;
-
- /* remove from any CRTC */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb == fb) {
- /* should turn off the crtc */
- memset(&set, 0, sizeof(struct drm_mode_set));
- set.crtc = crtc;
- set.fb = NULL;
- ret = crtc->funcs->set_config(&set);
- if (ret)
- DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
- }
- }
-
- drm_mode_object_put(dev, &fb->base);
- list_del(&fb->head);
- dev->mode_config.num_fb--;
-}
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
-
-/**
- * drm_crtc_init - Initialise a new CRTC object
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @funcs: callbacks for the new CRTC
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Inits a new object created as base part of an driver crtc object.
- */
-void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs)
-{
- crtc->dev = dev;
- crtc->funcs = funcs;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
-
- list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
- dev->mode_config.num_crtc++;
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_crtc_init);
-
-/**
- * drm_crtc_cleanup - Cleans up the core crtc usage.
- * @crtc: CRTC to cleanup
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Cleanup @crtc. Removes from drm modesetting space
- * does NOT free object, caller does that.
- */
-void drm_crtc_cleanup(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
-
- if (crtc->gamma_store) {
- kfree(crtc->gamma_store);
- crtc->gamma_store = NULL;
- }
-
- drm_mode_object_put(dev, &crtc->base);
- list_del(&crtc->head);
- dev->mode_config.num_crtc--;
-}
-EXPORT_SYMBOL(drm_crtc_cleanup);
-
-/**
- * drm_mode_probed_add - add a mode to a connector's probed mode list
- * @connector: connector the new mode
- * @mode: mode data
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Add @mode to @connector's mode list for later use.
- */
-void drm_mode_probed_add(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- list_add(&mode->head, &connector->probed_modes);
-}
-EXPORT_SYMBOL(drm_mode_probed_add);
-
-/**
- * drm_mode_remove - remove and free a mode
- * @connector: connector list to modify
- * @mode: mode to remove
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Remove @mode from @connector's mode list, then free it.
- */
-void drm_mode_remove(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- list_del(&mode->head);
- kfree(mode);
-}
-EXPORT_SYMBOL(drm_mode_remove);
-
-/**
- * drm_connector_init - Init a preallocated connector
- * @dev: DRM device
- * @connector: the connector to init
- * @funcs: callbacks for this connector
- * @name: user visible name of the connector
- *
- * LOCKING:
- * Caller must hold @dev's mode_config lock.
- *
- * Initialises a preallocated connector. Connectors should be
- * subclassed as part of driver connector objects.
- */
-void drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type)
-{
- mutex_lock(&dev->mode_config.mutex);
-
- connector->dev = dev;
- connector->funcs = funcs;
- drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
- connector->connector_type = connector_type;
- connector->connector_type_id =
- ++drm_connector_enum_list[connector_type].count; /* TODO */
- INIT_LIST_HEAD(&connector->user_modes);
- INIT_LIST_HEAD(&connector->probed_modes);
- INIT_LIST_HEAD(&connector->modes);
- connector->edid_blob_ptr = NULL;
-
- list_add_tail(&connector->head, &dev->mode_config.connector_list);
- dev->mode_config.num_connector++;
-
- drm_connector_attach_property(connector,
- dev->mode_config.edid_property, 0);
-
- drm_connector_attach_property(connector,
- dev->mode_config.dpms_property, 0);
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_connector_init);
-
-/**
- * drm_connector_cleanup - cleans up an initialised connector
- * @connector: connector to cleanup
- *
- * LOCKING:
- * Caller must hold @dev's mode_config lock.
- *
- * Cleans up the connector but doesn't free the object.
- */
-void drm_connector_cleanup(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *t;
-
- list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
- drm_mode_remove(connector, mode);
-
- list_for_each_entry_safe(mode, t, &connector->modes, head)
- drm_mode_remove(connector, mode);
-
- list_for_each_entry_safe(mode, t, &connector->user_modes, head)
- drm_mode_remove(connector, mode);
-
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_put(dev, &connector->base);
- list_del(&connector->head);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_connector_cleanup);
-
-void drm_encoder_init(struct drm_device *dev,
- struct drm_encoder *encoder,
- const struct drm_encoder_funcs *funcs,
- int encoder_type)
-{
- mutex_lock(&dev->mode_config.mutex);
-
- encoder->dev = dev;
-
- drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
- encoder->encoder_type = encoder_type;
- encoder->funcs = funcs;
-
- list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
- dev->mode_config.num_encoder++;
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_encoder_init);
-
-void drm_encoder_cleanup(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_put(dev, &encoder->base);
- list_del(&encoder->head);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_encoder_cleanup);
-
-/**
- * drm_mode_create - create a new display mode
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold DRM mode_config lock.
- *
- * Create a new drm_display_mode, give it an ID, and return it.
- *
- * RETURNS:
- * Pointer to new mode on success, NULL on error.
- */
-struct drm_display_mode *drm_mode_create(struct drm_device *dev)
-{
- struct drm_display_mode *nmode;
-
- nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
- if (!nmode)
- return NULL;
-
- drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
- return nmode;
-}
-EXPORT_SYMBOL(drm_mode_create);
-
-/**
- * drm_mode_destroy - remove a mode
- * @dev: DRM device
- * @mode: mode to remove
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Free @mode's unique identifier, then free it.
- */
-void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
-{
- drm_mode_object_put(dev, &mode->base);
-
- kfree(mode);
-}
-EXPORT_SYMBOL(drm_mode_destroy);
-
-static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
-{
- struct drm_property *edid;
- struct drm_property *dpms;
- int i;
-
- /*
- * Standard properties (apply to all connectors)
- */
- edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
- DRM_MODE_PROP_IMMUTABLE,
- "EDID", 0);
- dev->mode_config.edid_property = edid;
-
- dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
- drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
- drm_dpms_enum_list[i].name);
- dev->mode_config.dpms_property = dpms;
-
- return 0;
-}
-
-/**
- * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
- * @dev: DRM device
- *
- * Called by a driver the first time a DVI-I connector is made.
- */
-int drm_mode_create_dvi_i_properties(struct drm_device *dev)
-{
- struct drm_property *dvi_i_selector;
- struct drm_property *dvi_i_subconnector;
- int i;
-
- if (dev->mode_config.dvi_i_select_subconnector_property)
- return 0;
-
- dvi_i_selector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "select subconnector",
- ARRAY_SIZE(drm_dvi_i_select_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
- drm_property_add_enum(dvi_i_selector, i,
- drm_dvi_i_select_enum_list[i].type,
- drm_dvi_i_select_enum_list[i].name);
- dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
-
- dvi_i_subconnector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE,
- "subconnector",
- ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
- drm_property_add_enum(dvi_i_subconnector, i,
- drm_dvi_i_subconnector_enum_list[i].type,
- drm_dvi_i_subconnector_enum_list[i].name);
- dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
-
-/**
- * drm_create_tv_properties - create TV specific connector properties
- * @dev: DRM device
- * @num_modes: number of different TV formats (modes) supported
- * @modes: array of pointers to strings containing name of each format
- *
- * Called by a driver's TV initialization routine, this function creates
- * the TV specific connector properties for a given device. Caller is
- * responsible for allocating a list of format names and passing them to
- * this routine.
- */
-int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
- char *modes[])
-{
- struct drm_property *tv_selector;
- struct drm_property *tv_subconnector;
- int i;
-
- if (dev->mode_config.tv_select_subconnector_property)
- return 0;
-
- /*
- * Basic connector properties
- */
- tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "select subconnector",
- ARRAY_SIZE(drm_tv_select_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
- drm_property_add_enum(tv_selector, i,
- drm_tv_select_enum_list[i].type,
- drm_tv_select_enum_list[i].name);
- dev->mode_config.tv_select_subconnector_property = tv_selector;
-
- tv_subconnector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE, "subconnector",
- ARRAY_SIZE(drm_tv_subconnector_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
- drm_property_add_enum(tv_subconnector, i,
- drm_tv_subconnector_enum_list[i].type,
- drm_tv_subconnector_enum_list[i].name);
- dev->mode_config.tv_subconnector_property = tv_subconnector;
-
- /*
- * Other, TV specific properties: margins & TV modes.
- */
- dev->mode_config.tv_left_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left margin", 2);
- dev->mode_config.tv_left_margin_property->values[0] = 0;
- dev->mode_config.tv_left_margin_property->values[1] = 100;
-
- dev->mode_config.tv_right_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right margin", 2);
- dev->mode_config.tv_right_margin_property->values[0] = 0;
- dev->mode_config.tv_right_margin_property->values[1] = 100;
-
- dev->mode_config.tv_top_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top margin", 2);
- dev->mode_config.tv_top_margin_property->values[0] = 0;
- dev->mode_config.tv_top_margin_property->values[1] = 100;
-
- dev->mode_config.tv_bottom_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom margin", 2);
- dev->mode_config.tv_bottom_margin_property->values[0] = 0;
- dev->mode_config.tv_bottom_margin_property->values[1] = 100;
-
- dev->mode_config.tv_mode_property =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "mode", num_modes);
- for (i = 0; i < num_modes; i++)
- drm_property_add_enum(dev->mode_config.tv_mode_property, i,
- i, modes[i]);
-
- dev->mode_config.tv_brightness_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "brightness", 2);
- dev->mode_config.tv_brightness_property->values[0] = 0;
- dev->mode_config.tv_brightness_property->values[1] = 100;
-
- dev->mode_config.tv_contrast_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "contrast", 2);
- dev->mode_config.tv_contrast_property->values[0] = 0;
- dev->mode_config.tv_contrast_property->values[1] = 100;
-
- dev->mode_config.tv_flicker_reduction_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "flicker reduction", 2);
- dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
- dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
-
- dev->mode_config.tv_overscan_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "overscan", 2);
- dev->mode_config.tv_overscan_property->values[0] = 0;
- dev->mode_config.tv_overscan_property->values[1] = 100;
-
- dev->mode_config.tv_saturation_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "saturation", 2);
- dev->mode_config.tv_saturation_property->values[0] = 0;
- dev->mode_config.tv_saturation_property->values[1] = 100;
-
- dev->mode_config.tv_hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hue", 2);
- dev->mode_config.tv_hue_property->values[0] = 0;
- dev->mode_config.tv_hue_property->values[1] = 100;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_tv_properties);
-
-/**
- * drm_mode_create_scaling_mode_property - create scaling mode property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_scaling_mode_property(struct drm_device *dev)
-{
- struct drm_property *scaling_mode;
- int i;
-
- if (dev->mode_config.scaling_mode_property)
- return 0;
-
- scaling_mode =
- drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
- ARRAY_SIZE(drm_scaling_mode_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
- drm_property_add_enum(scaling_mode, i,
- drm_scaling_mode_enum_list[i].type,
- drm_scaling_mode_enum_list[i].name);
-
- dev->mode_config.scaling_mode_property = scaling_mode;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
-
-/**
- * drm_mode_create_dithering_property - create dithering property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dithering_property(struct drm_device *dev)
-{
- struct drm_property *dithering_mode;
- int i;
-
- if (dev->mode_config.dithering_mode_property)
- return 0;
-
- dithering_mode =
- drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
- ARRAY_SIZE(drm_dithering_mode_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
- drm_property_add_enum(dithering_mode, i,
- drm_dithering_mode_enum_list[i].type,
- drm_dithering_mode_enum_list[i].name);
- dev->mode_config.dithering_mode_property = dithering_mode;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dithering_property);
-
-/**
- * drm_mode_create_dirty_property - create dirty property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dirty_info_property(struct drm_device *dev)
-{
- struct drm_property *dirty_info;
- int i;
-
- if (dev->mode_config.dirty_info_property)
- return 0;
-
- dirty_info =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE,
- "dirty",
- ARRAY_SIZE(drm_dirty_info_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
- drm_property_add_enum(dirty_info, i,
- drm_dirty_info_enum_list[i].type,
- drm_dirty_info_enum_list[i].name);
- dev->mode_config.dirty_info_property = dirty_info;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
-
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * LOCKING:
- * None, should happen single threaded at init time.
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
- mutex_init(&dev->mode_config.mutex);
- mutex_init(&dev->mode_config.idr_mutex);
- INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.crtc_list);
- INIT_LIST_HEAD(&dev->mode_config.connector_list);
- INIT_LIST_HEAD(&dev->mode_config.encoder_list);
- INIT_LIST_HEAD(&dev->mode_config.property_list);
- INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
- idr_init(&dev->mode_config.crtc_idr);
-
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_create_standard_connector_properties(dev);
- mutex_unlock(&dev->mode_config.mutex);
-
- /* Just to be sure */
- dev->mode_config.num_fb = 0;
- dev->mode_config.num_connector = 0;
- dev->mode_config.num_crtc = 0;
- dev->mode_config.num_encoder = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
-int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
-{
- uint32_t total_objects = 0;
-
- total_objects += dev->mode_config.num_crtc;
- total_objects += dev->mode_config.num_connector;
- total_objects += dev->mode_config.num_encoder;
-
- if (total_objects == 0)
- return -EINVAL;
-
- group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
- if (!group->id_list)
- return -ENOMEM;
-
- group->num_crtcs = 0;
- group->num_connectors = 0;
- group->num_encoders = 0;
- return 0;
-}
-
-int drm_mode_group_init_legacy_group(struct drm_device *dev,
- struct drm_mode_group *group)
-{
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
- if ((ret = drm_mode_group_init(dev, group)))
- return ret;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- group->id_list[group->num_crtcs++] = crtc->base.id;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- group->id_list[group->num_crtcs + group->num_encoders++] =
- encoder->base.id;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- group->id_list[group->num_crtcs + group->num_encoders +
- group->num_connectors++] = connector->base.id;
-
- return 0;
-}
-
-/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
- struct drm_connector *connector, *ot;
- struct drm_crtc *crtc, *ct;
- struct drm_encoder *encoder, *enct;
- struct drm_framebuffer *fb, *fbt;
- struct drm_property *property, *pt;
-
- list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
- head) {
- encoder->funcs->destroy(encoder);
- }
-
- list_for_each_entry_safe(connector, ot,
- &dev->mode_config.connector_list, head) {
- connector->funcs->destroy(connector);
- }
-
- list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
- head) {
- drm_property_destroy(dev, property);
- }
-
- list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- fb->funcs->destroy(fb);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
- }
-
-}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
-
-/**
- * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
- * @out: drm_mode_modeinfo struct to return to the user
- * @in: drm_display_mode to use
- *
- * LOCKING:
- * None.
- *
- * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
- * the user.
- */
-void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
- struct drm_display_mode *in)
-{
- out->clock = in->clock;
- out->hdisplay = in->hdisplay;
- out->hsync_start = in->hsync_start;
- out->hsync_end = in->hsync_end;
- out->htotal = in->htotal;
- out->hskew = in->hskew;
- out->vdisplay = in->vdisplay;
- out->vsync_start = in->vsync_start;
- out->vsync_end = in->vsync_end;
- out->vtotal = in->vtotal;
- out->vscan = in->vscan;
- out->vrefresh = in->vrefresh;
- out->flags = in->flags;
- out->type = in->type;
- strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
- out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
-}
-
-/**
- * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
- * @out: drm_display_mode to return to the user
- * @in: drm_mode_modeinfo to use
- *
- * LOCKING:
- * None.
- *
- * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
- * the caller.
- */
-void drm_crtc_convert_umode(struct drm_display_mode *out,
- struct drm_mode_modeinfo *in)
-{
- out->clock = in->clock;
- out->hdisplay = in->hdisplay;
- out->hsync_start = in->hsync_start;
- out->hsync_end = in->hsync_end;
- out->htotal = in->htotal;
- out->hskew = in->hskew;
- out->vdisplay = in->vdisplay;
- out->vsync_start = in->vsync_start;
- out->vsync_end = in->vsync_end;
- out->vtotal = in->vtotal;
- out->vscan = in->vscan;
- out->vrefresh = in->vrefresh;
- out->flags = in->flags;
- out->type = in->type;
- strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
- out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
-}
-
-/**
- * drm_mode_getresources - get graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Construct a set of configuration description structures and return
- * them to the user, including CRTC, connector and framebuffer configuration.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getresources(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_card_res *card_res = data;
- struct list_head *lh;
- struct drm_framebuffer *fb;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int ret = 0;
- int connector_count = 0;
- int crtc_count = 0;
- int fb_count = 0;
- int encoder_count = 0;
- int copied = 0, i;
- uint32_t __user *fb_id;
- uint32_t __user *crtc_id;
- uint32_t __user *connector_id;
- uint32_t __user *encoder_id;
- struct drm_mode_group *mode_group;
-
- mutex_lock(&dev->mode_config.mutex);
-
- /*
- * For the non-control nodes we need to limit the list of resources
- * by IDs in the group list for this node
- */
- list_for_each(lh, &file_priv->fbs)
- fb_count++;
-
- mode_group = &file_priv->master->minor->mode_group;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
-
- list_for_each(lh, &dev->mode_config.crtc_list)
- crtc_count++;
-
- list_for_each(lh, &dev->mode_config.connector_list)
- connector_count++;
-
- list_for_each(lh, &dev->mode_config.encoder_list)
- encoder_count++;
- } else {
-
- crtc_count = mode_group->num_crtcs;
- connector_count = mode_group->num_connectors;
- encoder_count = mode_group->num_encoders;
- }
-
- card_res->max_height = dev->mode_config.max_height;
- card_res->min_height = dev->mode_config.min_height;
- card_res->max_width = dev->mode_config.max_width;
- card_res->min_width = dev->mode_config.min_width;
-
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
-
- /* CRTCs */
- if (card_res->count_crtcs >= crtc_count) {
- copied = 0;
- crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- head) {
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- for (i = 0; i < mode_group->num_crtcs; i++) {
- if (put_user(mode_group->id_list[i],
- crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- card_res->count_crtcs = crtc_count;
-
- /* Encoders */
- if (card_res->count_encoders >= encoder_count) {
- copied = 0;
- encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- head) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
- drm_get_encoder_name(encoder));
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
- if (put_user(mode_group->id_list[i],
- encoder_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
-
- }
- }
- card_res->count_encoders = encoder_count;
-
- /* Connectors */
- if (card_res->count_connectors >= connector_count) {
- copied = 0;
- connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id,
- drm_get_connector_name(connector));
- if (put_user(connector->base.id,
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- int start = mode_group->num_crtcs +
- mode_group->num_encoders;
- for (i = start; i < start + mode_group->num_connectors; i++) {
- if (put_user(mode_group->id_list[i],
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- card_res->count_connectors = connector_count;
-
- DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
- card_res->count_connectors, card_res->count_encoders);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_getcrtc - get CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Construct a CRTC configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc *crtc_resp = data;
- struct drm_crtc *crtc;
- struct drm_mode_object *obj;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- crtc_resp->x = crtc->x;
- crtc_resp->y = crtc->y;
- crtc_resp->gamma_size = crtc->gamma_size;
- if (crtc->fb)
- crtc_resp->fb_id = crtc->fb->base.id;
- else
- crtc_resp->fb_id = 0;
-
- if (crtc->enabled) {
-
- drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
- crtc_resp->mode_valid = 1;
-
- } else {
- crtc_resp->mode_valid = 0;
- }
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_getconnector - get connector configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Construct a connector configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getconnector(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_get_connector *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_connector *connector;
- struct drm_display_mode *mode;
- int mode_count = 0;
- int props_count = 0;
- int encoders_count = 0;
- int ret = 0;
- int copied = 0;
- int i;
- struct drm_mode_modeinfo u_mode;
- struct drm_mode_modeinfo __user *mode_ptr;
- uint32_t __user *prop_ptr;
- uint64_t __user *prop_values;
- uint32_t __user *encoder_ptr;
-
- memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, out_resp->connector_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- props_count++;
- }
- }
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- encoders_count++;
- }
- }
-
- if (out_resp->count_modes == 0) {
- connector->funcs->fill_modes(connector,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
- }
-
- /* delayed so we get modes regardless of pre-fill_modes state */
- list_for_each_entry(mode, &connector->modes, head)
- mode_count++;
-
- out_resp->connector_id = connector->base.id;
- out_resp->connector_type = connector->connector_type;
- out_resp->connector_type_id = connector->connector_type_id;
- out_resp->mm_width = connector->display_info.width_mm;
- out_resp->mm_height = connector->display_info.height_mm;
- out_resp->subpixel = connector->display_info.subpixel_order;
- out_resp->connection = connector->status;
- if (connector->encoder)
- out_resp->encoder_id = connector->encoder->base.id;
- else
- out_resp->encoder_id = 0;
-
- /*
- * This ioctl is called twice, once to determine how much space is
- * needed, and the 2nd time to fill it.
- */
- if ((out_resp->count_modes >= mode_count) && mode_count) {
- copied = 0;
- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
- list_for_each_entry(mode, &connector->modes, head) {
- drm_crtc_convert_to_umode(&u_mode, mode);
- if (copy_to_user(mode_ptr + copied,
- &u_mode, sizeof(u_mode))) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- out_resp->count_modes = mode_count;
-
- if ((out_resp->count_props >= props_count) && props_count) {
- copied = 0;
- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- if (put_user(connector->property_ids[i],
- prop_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
-
- if (put_user(connector->property_values[i],
- prop_values + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- out_resp->count_props = props_count;
-
- if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
- copied = 0;
- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- if (put_user(connector->encoder_ids[i],
- encoder_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- out_resp->count_encoders = encoders_count;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_getencoder(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_get_encoder *enc_resp = data;
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, enc_resp->encoder_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- encoder = obj_to_encoder(obj);
-
- if (encoder->crtc)
- enc_resp->crtc_id = encoder->crtc->base.id;
- else
- enc_resp->crtc_id = 0;
- enc_resp->encoder_type = encoder->encoder_type;
- enc_resp->encoder_id = encoder->base.id;
- enc_resp->possible_crtcs = encoder->possible_crtcs;
- enc_resp->possible_clones = encoder->possible_clones;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_setcrtc - set CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Build a new CRTC configuration based on user request.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_setcrtc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_mode_crtc *crtc_req = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc, *crtcfb;
- struct drm_connector **connector_set = NULL, *connector;
- struct drm_framebuffer *fb = NULL;
- struct drm_display_mode *mode = NULL;
- struct drm_mode_set set;
- uint32_t __user *set_connectors_ptr;
- int ret = 0;
- int i;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
-
- if (crtc_req->mode_valid) {
- /* If we have a mode we need a framebuffer. */
- /* If we pass -1, set the mode with the currently bound fb */
- if (crtc_req->fb_id == -1) {
- list_for_each_entry(crtcfb,
- &dev->mode_config.crtc_list, head) {
- if (crtcfb == crtc) {
- DRM_DEBUG_KMS("Using current fb for "
- "setmode\n");
- fb = crtc->fb;
- }
- }
- } else {
- obj = drm_mode_object_find(dev, crtc_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown FB ID%d\n",
- crtc_req->fb_id);
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
- }
-
- mode = drm_mode_create(dev);
- drm_crtc_convert_umode(mode, &crtc_req->mode);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- }
-
- if (crtc_req->count_connectors == 0 && mode) {
- DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
- ret = -EINVAL;
- goto out;
- }
-
- if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
- DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
- crtc_req->count_connectors);
- ret = -EINVAL;
- goto out;
- }
-
- if (crtc_req->count_connectors > 0) {
- u32 out_id;
-
- /* Avoid unbounded kernel memory allocation */
- if (crtc_req->count_connectors > config->num_connector) {
- ret = -EINVAL;
- goto out;
- }
-
- connector_set = kmalloc(crtc_req->count_connectors *
- sizeof(struct drm_connector *),
- GFP_KERNEL);
- if (!connector_set) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < crtc_req->count_connectors; i++) {
- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
- if (get_user(out_id, &set_connectors_ptr[i])) {
- ret = -EFAULT;
- goto out;
- }
-
- obj = drm_mode_object_find(dev, out_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- DRM_DEBUG_KMS("Connector id %d unknown\n",
- out_id);
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id,
- drm_get_connector_name(connector));
-
- connector_set[i] = connector;
- }
- }
-
- set.crtc = crtc;
- set.x = crtc_req->x;
- set.y = crtc_req->y;
- set.mode = mode;
- set.connectors = connector_set;
- set.num_connectors = crtc_req->count_connectors;
- set.fb = fb;
- ret = crtc->funcs->set_config(&set);
-
-out:
- kfree(connector_set);
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_cursor_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_cursor *req = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- int ret = 0;
-
- if (!req->flags) {
- DRM_ERROR("no operation set\n");
- return -EINVAL;
- }
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- if (req->flags & DRM_MODE_CURSOR_BO) {
- if (!crtc->funcs->cursor_set) {
- DRM_ERROR("crtc does not support cursor\n");
- ret = -ENXIO;
- goto out;
- }
- /* Turns off the cursor if handle is 0 */
- ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
- req->width, req->height);
- }
-
- if (req->flags & DRM_MODE_CURSOR_MOVE) {
- if (crtc->funcs->cursor_move) {
- ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
- } else {
- DRM_ERROR("crtc does not support cursor\n");
- ret = -EFAULT;
- goto out;
- }
- }
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_addfb - add an FB to the graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Add a new FB to the specified CRTC, given a user request.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_fb_cmd *r = data;
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_framebuffer *fb;
- int ret = 0;
-
- if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_ERROR("mode new framebuffer width not within limits\n");
- return -EINVAL;
- }
- if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_ERROR("mode new framebuffer height not within limits\n");
- return -EINVAL;
- }
-
- mutex_lock(&dev->mode_config.mutex);
-
- /* TODO check buffer is sufficently large */
- /* TODO setup destructor callback */
-
- fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
- if (IS_ERR(fb)) {
- DRM_ERROR("could not create framebuffer\n");
- ret = PTR_ERR(fb);
- goto out;
- }
-
- r->fb_id = fb->base.id;
- list_add(&fb->filp_head, &file_priv->fbs);
- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_rmfb - remove an FB from the configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Remove the FB specified by the user.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb = NULL;
- struct drm_framebuffer *fbl = NULL;
- uint32_t *id = data;
- int ret = 0;
- int found = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
- /* TODO check that we realy get a framebuffer back. */
- if (!obj) {
- DRM_ERROR("mode invalid framebuffer id\n");
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
-
- list_for_each_entry(fbl, &file_priv->fbs, filp_head)
- if (fb == fbl)
- found = 1;
-
- if (!found) {
- DRM_ERROR("tried to remove a fb that we didn't own\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* TODO release all crtc connected to the framebuffer */
- /* TODO unhock the destructor from the buffer object */
-
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_getfb - get FB info
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Lookup the FB given its ID and return info about it.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_fb_cmd *r = data;
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
-
- r->height = fb->height;
- r->width = fb->width;
- r->depth = fb->depth;
- r->bpp = fb->bits_per_pixel;
- r->pitch = fb->pitch;
- fb->funcs->create_handle(fb, file_priv, &r->handle);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_clip_rect __user *clips_ptr;
- struct drm_clip_rect *clips = NULL;
- struct drm_mode_fb_dirty_cmd *r = data;
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb;
- unsigned flags;
- int num_clips;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
- ret = -EINVAL;
- goto out_err1;
- }
- fb = obj_to_fb(obj);
-
- num_clips = r->num_clips;
- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
-
- if (!num_clips != !clips_ptr) {
- ret = -EINVAL;
- goto out_err1;
- }
-
- flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
-
- /* If userspace annotates copy, clips must come in pairs */
- if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
- ret = -EINVAL;
- goto out_err1;
- }
-
- if (num_clips && clips_ptr) {
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
- if (!clips) {
- ret = -ENOMEM;
- goto out_err1;
- }
-
- ret = copy_from_user(clips, clips_ptr,
- num_clips * sizeof(*clips));
- if (ret) {
- ret = -EFAULT;
- goto out_err2;
- }
- }
-
- if (fb->funcs->dirty) {
- ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
- clips, num_clips);
- } else {
- ret = -ENOSYS;
- goto out_err2;
- }
-
-out_err2:
- kfree(clips);
-out_err1:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-
-/**
- * drm_fb_release - remove and free the FBs on this file
- * @filp: file * from the ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Destroy all the FBs associated with @filp.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-void drm_fb_release(struct drm_file *priv)
-{
- struct drm_device *dev = priv->minor->dev;
- struct drm_framebuffer *fb, *tfb;
-
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
- }
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-/**
- * drm_mode_attachmode - add a mode to the user mode list
- * @dev: DRM device
- * @connector: connector to add the mode to
- * @mode: mode to add
- *
- * Add @mode to @connector's user mode list.
- */
-static int drm_mode_attachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- int ret = 0;
-
- list_add_tail(&mode->head, &connector->user_modes);
- return ret;
-}
-
-int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_connector *connector;
- int ret = 0;
- struct drm_display_mode *dup_mode;
- int need_dup = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder)
- break;
- if (connector->encoder->crtc == crtc) {
- if (need_dup)
- dup_mode = drm_mode_duplicate(dev, mode);
- else
- dup_mode = mode;
- ret = drm_mode_attachmode(dev, connector, dup_mode);
- if (ret)
- return ret;
- need_dup = 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_attachmode_crtc);
-
-static int drm_mode_detachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- int found = 0;
- int ret = 0;
- struct drm_display_mode *match_mode, *t;
-
- list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
- if (drm_mode_equal(match_mode, mode)) {
- list_del(&match_mode->head);
- drm_mode_destroy(dev, match_mode);
- found = 1;
- break;
- }
- }
-
- if (!found)
- ret = -EINVAL;
-
- return ret;
-}
-
-int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
-{
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_mode_detachmode(dev, connector, mode);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_detachmode_crtc);
-
-/**
- * drm_fb_attachmode - Attach a user mode to an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * This attaches a user specified mode to an connector.
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_attachmode_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_mode_cmd *mode_cmd = data;
- struct drm_connector *connector;
- struct drm_display_mode *mode;
- struct drm_mode_object *obj;
- struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- mode = drm_mode_create(dev);
- if (!mode) {
- ret = -ENOMEM;
- goto out;
- }
-
- drm_crtc_convert_umode(mode, umode);
-
- ret = drm_mode_attachmode(dev, connector, mode);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-
-/**
- * drm_fb_detachmode - Detach a user specified mode from an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_detachmode_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_mode_mode_cmd *mode_cmd = data;
- struct drm_connector *connector;
- struct drm_display_mode mode;
- struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- drm_crtc_convert_umode(&mode, umode);
- ret = drm_mode_detachmode(dev, connector, &mode);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
- const char *name, int num_values)
-{
- struct drm_property *property = NULL;
-
- property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
- if (!property)
- return NULL;
-
- if (num_values) {
- property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
- if (!property->values)
- goto fail;
- }
-
- drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
- property->flags = flags;
- property->num_values = num_values;
- INIT_LIST_HEAD(&property->enum_blob_list);
-
- if (name)
- strncpy(property->name, name, DRM_PROP_NAME_LEN);
-
- list_add_tail(&property->head, &dev->mode_config.property_list);
- return property;
-fail:
- kfree(property);
- return NULL;
-}
-EXPORT_SYMBOL(drm_property_create);
-
-int drm_property_add_enum(struct drm_property *property, int index,
- uint64_t value, const char *name)
-{
- struct drm_property_enum *prop_enum;
-
- if (!(property->flags & DRM_MODE_PROP_ENUM))
- return -EINVAL;
-
- if (!list_empty(&property->enum_blob_list)) {
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
- if (prop_enum->value == value) {
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
- return 0;
- }
- }
- }
-
- prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
- if (!prop_enum)
- return -ENOMEM;
-
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
- prop_enum->value = value;
-
- property->values[index] = value;
- list_add_tail(&prop_enum->head, &property->enum_blob_list);
- return 0;
-}
-EXPORT_SYMBOL(drm_property_add_enum);
-
-void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
-{
- struct drm_property_enum *prop_enum, *pt;
-
- list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
- list_del(&prop_enum->head);
- kfree(prop_enum);
- }
-
- if (property->num_values)
- kfree(property->values);
- drm_mode_object_put(dev, &property->base);
- list_del(&property->head);
- kfree(property);
-}
-EXPORT_SYMBOL(drm_property_destroy);
-
-int drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == 0) {
- connector->property_ids[i] = property->base.id;
- connector->property_values[i] = init_val;
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t value)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- connector->property_values[i] = value;
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t *val)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- *val = connector->property_values[i];
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
-int drm_mode_getproperty_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_mode_get_property *out_resp = data;
- struct drm_property *property;
- int enum_count = 0;
- int blob_count = 0;
- int value_count = 0;
- int ret = 0, i;
- int copied;
- struct drm_property_enum *prop_enum;
- struct drm_mode_property_enum __user *enum_ptr;
- struct drm_property_blob *prop_blob;
- uint32_t *blob_id_ptr;
- uint64_t __user *values_ptr;
- uint32_t __user *blob_length_ptr;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
- ret = -EINVAL;
- goto done;
- }
- property = obj_to_property(obj);
-
- if (property->flags & DRM_MODE_PROP_ENUM) {
- list_for_each_entry(prop_enum, &property->enum_blob_list, head)
- enum_count++;
- } else if (property->flags & DRM_MODE_PROP_BLOB) {
- list_for_each_entry(prop_blob, &property->enum_blob_list, head)
- blob_count++;
- }
-
- value_count = property->num_values;
-
- strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
- out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
- out_resp->flags = property->flags;
-
- if ((out_resp->count_values >= value_count) && value_count) {
- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
- for (i = 0; i < value_count; i++) {
- if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
- ret = -EFAULT;
- goto done;
- }
- }
- }
- out_resp->count_values = value_count;
-
- if (property->flags & DRM_MODE_PROP_ENUM) {
- if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
- copied = 0;
- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
-
- if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
- ret = -EFAULT;
- goto done;
- }
-
- if (copy_to_user(&enum_ptr[copied].name,
- &prop_enum->name, DRM_PROP_NAME_LEN)) {
- ret = -EFAULT;
- goto done;
- }
- copied++;
- }
- }
- out_resp->count_enum_blobs = enum_count;
- }
-
- if (property->flags & DRM_MODE_PROP_BLOB) {
- if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
- copied = 0;
- blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
-
- list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
- if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
- ret = -EFAULT;
- goto done;
- }
-
- if (put_user(prop_blob->length, blob_length_ptr + copied)) {
- ret = -EFAULT;
- goto done;
- }
-
- copied++;
- }
- }
- out_resp->count_enum_blobs = blob_count;
- }
-done:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
- void *data)
-{
- struct drm_property_blob *blob;
-
- if (!length || !data)
- return NULL;
-
- blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
- if (!blob)
- return NULL;
-
- blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
- blob->length = length;
-
- memcpy(blob->data, data, length);
-
- drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
-
- list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
- return blob;
-}
-
-static void drm_property_destroy_blob(struct drm_device *dev,
- struct drm_property_blob *blob)
-{
- drm_mode_object_put(dev, &blob->base);
- list_del(&blob->head);
- kfree(blob);
-}
-
-int drm_mode_getblob_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_mode_get_blob *out_resp = data;
- struct drm_property_blob *blob;
- int ret = 0;
- void *blob_ptr;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
- if (!obj) {
- ret = -EINVAL;
- goto done;
- }
- blob = obj_to_blob(obj);
-
- if (out_resp->length == blob->length) {
- blob_ptr = (void *)(unsigned long)out_resp->data;
- if (copy_to_user(blob_ptr, blob->data, blob->length)){
- ret = -EFAULT;
- goto done;
- }
- }
- out_resp->length = blob->length;
-
-done:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- int ret = 0, size;
-
- if (connector->edid_blob_ptr)
- drm_property_destroy_blob(dev, connector->edid_blob_ptr);
-
- /* Delete edid, when there is none. */
- if (!edid) {
- connector->edid_blob_ptr = NULL;
- ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
- return ret;
- }
-
- size = EDID_LENGTH * (1 + edid->extensions);
- connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
- size, edid);
-
- ret = drm_connector_property_set_value(connector,
- dev->mode_config.edid_property,
- connector->edid_blob_ptr->base.id);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
-
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_connector_set_property *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_property *property;
- struct drm_connector *connector;
- int ret = -EINVAL;
- int i;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- goto out;
- }
- connector = obj_to_connector(obj);
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == out_resp->prop_id)
- break;
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY) {
- goto out;
- }
-
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
- goto out;
- }
- property = obj_to_property(obj);
-
- if (property->flags & DRM_MODE_PROP_IMMUTABLE)
- goto out;
-
- if (property->flags & DRM_MODE_PROP_RANGE) {
- if (out_resp->value < property->values[0])
- goto out;
-
- if (out_resp->value > property->values[1])
- goto out;
- } else {
- int found = 0;
- for (i = 0; i < property->num_values; i++) {
- if (property->values[i] == out_resp->value) {
- found = 1;
- break;
- }
- }
- if (!found) {
- goto out;
- }
- }
-
- /* Do DPMS ourselves */
- if (property == connector->dev->mode_config.dpms_property) {
- if (connector->funcs->dpms)
- (*connector->funcs->dpms)(connector, (int) out_resp->value);
- ret = 0;
- } else if (connector->funcs->set_property)
- ret = connector->funcs->set_property(connector, property, out_resp->value);
-
- /* store the property value if successful */
- if (!ret)
- drm_connector_property_set_value(connector, property, out_resp->value);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0) {
- connector->encoder_ids[i] = encoder->base.id;
- return 0;
- }
- }
- return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
-
-void drm_mode_connector_detach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
-{
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == encoder->base.id) {
- connector->encoder_ids[i] = 0;
- if (connector->encoder == encoder)
- connector->encoder = NULL;
- break;
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
-
-bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
- int gamma_size)
-{
- crtc->gamma_size = gamma_size;
-
- crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
- if (!crtc->gamma_store) {
- crtc->gamma_size = 0;
- return false;
- }
-
- return true;
-}
-EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
-
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- void *r_base, *g_base, *b_base;
- int size;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- /* memcpy into gamma store */
- if (crtc_lut->gamma_size != crtc->gamma_size) {
- ret = -EINVAL;
- goto out;
- }
-
- size = crtc_lut->gamma_size * (sizeof(uint16_t));
- r_base = crtc->gamma_store;
- if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- g_base = r_base + size;
- if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- b_base = g_base + size;
- if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-
-}
-
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- void *r_base, *g_base, *b_base;
- int size;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- /* memcpy into gamma store */
- if (crtc_lut->gamma_size != crtc->gamma_size) {
- ret = -EINVAL;
- goto out;
- }
-
- size = crtc_lut->gamma_size * (sizeof(uint16_t));
- r_base = crtc->gamma_store;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- g_base = r_base + size;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- b_base = g_base + size;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_page_flip *page_flip = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
- struct drm_pending_vblank_event *e = NULL;
- unsigned long flags;
- int ret = -EINVAL;
-
- if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
- page_flip->reserved != 0)
- return -EINVAL;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj)
- goto out;
- crtc = obj_to_crtc(obj);
-
- if (crtc->fb == NULL) {
- /* The framebuffer is currently unbound, presumably
- * due to a hotplug event, that userspace has not
- * yet discovered.
- */
- ret = -EBUSY;
- goto out;
- }
-
- if (crtc->funcs->page_flip == NULL)
- goto out;
-
- obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj)
- goto out;
- fb = obj_to_fb(obj);
-
- if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- ret = -ENOMEM;
- spin_lock_irqsave(&dev->event_lock, flags);
- if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- goto out;
- }
- file_priv->event_space -= sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL) {
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- goto out;
- }
-
- e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
- e->event.base.length = sizeof e->event;
- e->event.user_data = page_flip->user_data;
- e->base.event = &e->event.base;
- e->base.file_priv = file_priv;
- e->base.destroy =
- (void (*) (struct drm_pending_event *)) kfree;
- }
-
- ret = crtc->funcs->page_flip(crtc, fb, e);
- if (ret) {
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- }
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
+++ /dev/null
-/*
- * Copyright (c) 2006-2008 Intel Corporation
- * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- *
- * DRM core CRTC related functions
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- *
- * Authors:
- * Keith Packard
- * Eric Anholt <eric@anholt.net>
- * Dave Airlie <airlied@linux.ie>
- * Jesse Barnes <jesse.barnes@intel.com>
- */
-
-#include "drmP.h"
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
-#include "drm_fb_helper.h"
-
-static bool drm_kms_helper_poll = true;
-module_param_named(poll, drm_kms_helper_poll, bool, 0600);
-
-static void drm_mode_validate_flag(struct drm_connector *connector,
- int flags)
-{
- struct drm_display_mode *mode, *t;
-
- if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
- return;
-
- list_for_each_entry_safe(mode, t, &connector->modes, head) {
- if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
- !(flags & DRM_MODE_FLAG_INTERLACE))
- mode->status = MODE_NO_INTERLACE;
- if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
- !(flags & DRM_MODE_FLAG_DBLSCAN))
- mode->status = MODE_NO_DBLESCAN;
- }
-
- return;
-}
-
-/**
- * drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
- * @maxX: max width for modes
- * @maxY: max height for modes
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them. Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
- *
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
- *
- * FIXME: take into account monitor limits
- *
- * RETURNS:
- * Number of modes found on @connector.
- */
-int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *t;
- struct drm_connector_helper_funcs *connector_funcs =
- connector->helper_private;
- int count = 0;
- int mode_flags = 0;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
- drm_get_connector_name(connector));
- /* set all modes to the unverified state */
- list_for_each_entry_safe(mode, t, &connector->modes, head)
- mode->status = MODE_UNVERIFIED;
-
- if (connector->force) {
- if (connector->force == DRM_FORCE_ON)
- connector->status = connector_status_connected;
- else
- connector->status = connector_status_disconnected;
- if (connector->funcs->force)
- connector->funcs->force(connector);
- } else {
- connector->status = connector->funcs->detect(connector, true);
- drm_kms_helper_poll_enable(dev);
- }
-
- if (connector->status == connector_status_disconnected) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
- connector->base.id, drm_get_connector_name(connector));
- drm_mode_connector_update_edid_property(connector, NULL);
- goto prune;
- }
-
- count = (*connector_funcs->get_modes)(connector);
- if (count == 0 && connector->status == connector_status_connected)
- count = drm_add_modes_noedid(connector, 1024, 768);
- if (count == 0)
- goto prune;
-
- drm_mode_connector_list_update(connector);
-
- if (maxX && maxY)
- drm_mode_validate_size(dev, &connector->modes, maxX,
- maxY, 0);
-
- if (connector->interlace_allowed)
- mode_flags |= DRM_MODE_FLAG_INTERLACE;
- if (connector->doublescan_allowed)
- mode_flags |= DRM_MODE_FLAG_DBLSCAN;
- drm_mode_validate_flag(connector, mode_flags);
-
- list_for_each_entry_safe(mode, t, &connector->modes, head) {
- if (mode->status == MODE_OK)
- mode->status = connector_funcs->mode_valid(connector,
- mode);
- }
-
-prune:
- drm_mode_prune_invalid(dev, &connector->modes, true);
-
- if (list_empty(&connector->modes))
- return 0;
-
- drm_mode_sort(&connector->modes);
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
- drm_get_connector_name(connector));
- list_for_each_entry_safe(mode, t, &connector->modes, head) {
- mode->vrefresh = drm_mode_vrefresh(mode);
-
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- drm_mode_debug_printmodeline(mode);
- }
-
- return count;
-}
-EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
-
-/**
- * drm_helper_encoder_in_use - check if a given encoder is in use
- * @encoder: encoder to check
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Walk @encoders's DRM device's mode_config and see if it's in use.
- *
- * RETURNS:
- * True if @encoder is part of the mode_config, false otherwise.
- */
-bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
-{
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- return true;
- return false;
-}
-EXPORT_SYMBOL(drm_helper_encoder_in_use);
-
-/**
- * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
- * @crtc: CRTC to check
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Walk @crtc's DRM device's mode_config and see if it's in use.
- *
- * RETURNS:
- * True if @crtc is part of the mode_config, false otherwise.
- */
-bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
-{
- struct drm_encoder *encoder;
- struct drm_device *dev = crtc->dev;
- /* FIXME: Locking around list access? */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
- return true;
- return false;
-}
-EXPORT_SYMBOL(drm_helper_crtc_in_use);
-
-static void
-drm_encoder_disable(struct drm_encoder *encoder)
-{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-
- if (encoder_funcs->disable)
- (*encoder_funcs->disable)(encoder);
- else
- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
-}
-
-/**
- * drm_helper_disable_unused_functions - disable unused objects
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
- * by calling its dpms function, which should power it off.
- */
-void drm_helper_disable_unused_functions(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder)
- continue;
- if (connector->status == connector_status_disconnected)
- connector->encoder = NULL;
- }
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (!drm_helper_encoder_in_use(encoder)) {
- drm_encoder_disable(encoder);
- /* disconnector encoder from any connector */
- encoder->crtc = NULL;
- }
- }
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc->enabled = drm_helper_crtc_in_use(crtc);
- if (!crtc->enabled) {
- if (crtc_funcs->disable)
- (*crtc_funcs->disable)(crtc);
- else
- (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
- crtc->fb = NULL;
- }
- }
-}
-EXPORT_SYMBOL(drm_helper_disable_unused_functions);
-
-/**
- * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
- * @encoder: encoder to test
- * @crtc: crtc to test
- *
- * Return false if @encoder can't be driven by @crtc, true otherwise.
- */
-static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev;
- struct drm_crtc *tmp;
- int crtc_mask = 1;
-
- WARN(!crtc, "checking null crtc?\n");
-
- dev = crtc->dev;
-
- list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
- if (tmp == crtc)
- break;
- crtc_mask <<= 1;
- }
-
- if (encoder->possible_crtcs & crtc_mask)
- return true;
- return false;
-}
-
-/*
- * Check the CRTC we're going to map each output to vs. its current
- * CRTC. If they don't match, we have to disable the output and the CRTC
- * since the driver will have to re-route things.
- */
-static void
-drm_crtc_prepare_encoders(struct drm_device *dev)
-{
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_encoder *encoder;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder_funcs = encoder->helper_private;
- /* Disable unused encoders */
- if (encoder->crtc == NULL)
- drm_encoder_disable(encoder);
- /* Disable encoders whose CRTC is about to change */
- if (encoder_funcs->get_crtc &&
- encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
- drm_encoder_disable(encoder);
- }
-}
-
-/**
- * drm_crtc_set_mode - set a mode
- * @crtc: CRTC to program
- * @mode: mode to use
- * @x: width of mode
- * @y: height of mode
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
- *
- * RETURNS:
- * True if the mode was set successfully, or false otherwise.
- */
-bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_display_mode *adjusted_mode, saved_mode;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- struct drm_encoder_helper_funcs *encoder_funcs;
- int saved_x, saved_y;
- struct drm_encoder *encoder;
- bool ret = true;
-
- adjusted_mode = drm_mode_duplicate(dev, mode);
-
- crtc->enabled = drm_helper_crtc_in_use(crtc);
-
- if (!crtc->enabled)
- return true;
-
- saved_mode = crtc->mode;
- saved_x = crtc->x;
- saved_y = crtc->y;
-
- /* Update crtc values up front so the driver can rely on them for mode
- * setting.
- */
- crtc->mode = *mode;
- crtc->x = x;
- crtc->y = y;
-
- /* Pass our mode to the connectors and the CRTC to give them a chance to
- * adjust it according to limitations or connector properties, and also
- * a chance to reject the mode entirely.
- */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
- if (encoder->crtc != crtc)
- continue;
- encoder_funcs = encoder->helper_private;
- if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
- adjusted_mode))) {
- goto done;
- }
- }
-
- if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
- goto done;
- }
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
-
- /* Prepare the encoders and CRTCs before setting the mode. */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
- if (encoder->crtc != crtc)
- continue;
- encoder_funcs = encoder->helper_private;
- /* Disable the encoders as the first thing we do. */
- encoder_funcs->prepare(encoder);
- }
-
- drm_crtc_prepare_encoders(dev);
-
- crtc_funcs->prepare(crtc);
-
- /* Set up the DPLL and any encoders state that needs to adjust or depend
- * on the DPLL.
- */
- ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
- if (!ret)
- goto done;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
- if (encoder->crtc != crtc)
- continue;
-
- DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
- encoder->base.id, drm_get_encoder_name(encoder),
- mode->base.id, mode->name);
- encoder_funcs = encoder->helper_private;
- encoder_funcs->mode_set(encoder, mode, adjusted_mode);
- }
-
- /* Now enable the clocks, plane, pipe, and connectors that we set up. */
- crtc_funcs->commit(crtc);
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
- if (encoder->crtc != crtc)
- continue;
-
- encoder_funcs = encoder->helper_private;
- encoder_funcs->commit(encoder);
-
- }
-
- /* XXX free adjustedmode */
- drm_mode_destroy(dev, adjusted_mode);
- /* FIXME: add subpixel order */
-done:
- if (!ret) {
- crtc->mode = saved_mode;
- crtc->x = saved_x;
- crtc->y = saved_y;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(drm_crtc_helper_set_mode);
-
-
-/**
- * drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
- *
- * RETURNS:
- * Zero. (FIXME)
- */
-int drm_crtc_helper_set_config(struct drm_mode_set *set)
-{
- struct drm_device *dev;
- struct drm_crtc *save_crtcs, *new_crtc, *crtc;
- struct drm_encoder *save_encoders, *new_encoder, *encoder;
- struct drm_framebuffer *old_fb = NULL;
- bool mode_changed = false; /* if true do a full mode set */
- bool fb_changed = false; /* if true and !mode_changed just do a flip */
- struct drm_connector *save_connectors, *connector;
- int count = 0, ro, fail = 0;
- struct drm_crtc_helper_funcs *crtc_funcs;
- int ret = 0;
- int i;
-
- DRM_DEBUG_KMS("\n");
-
- if (!set)
- return -EINVAL;
-
- if (!set->crtc)
- return -EINVAL;
-
- if (!set->crtc->helper_private)
- return -EINVAL;
-
- crtc_funcs = set->crtc->helper_private;
-
- if (set->fb) {
- DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
- set->crtc->base.id, set->fb->base.id,
- (int)set->num_connectors, set->x, set->y);
- } else {
- DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n",
- set->crtc->base.id, (int)set->num_connectors,
- set->x, set->y);
- }
-
- dev = set->crtc->dev;
-
- /* Allocate space for the backup of all (non-pointer) crtc, encoder and
- * connector data. */
- save_crtcs = kzalloc(dev->mode_config.num_crtc *
- sizeof(struct drm_crtc), GFP_KERNEL);
- if (!save_crtcs)
- return -ENOMEM;
-
- save_encoders = kzalloc(dev->mode_config.num_encoder *
- sizeof(struct drm_encoder), GFP_KERNEL);
- if (!save_encoders) {
- kfree(save_crtcs);
- return -ENOMEM;
- }
-
- save_connectors = kzalloc(dev->mode_config.num_connector *
- sizeof(struct drm_connector), GFP_KERNEL);
- if (!save_connectors) {
- kfree(save_crtcs);
- kfree(save_encoders);
- return -ENOMEM;
- }
-
- /* Copy data. Note that driver private data is not affected.
- * Should anything bad happen only the expected state is
- * restored, not the drivers personal bookkeeping.
- */
- count = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- save_crtcs[count++] = *crtc;
- }
-
- count = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- save_encoders[count++] = *encoder;
- }
-
- count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- save_connectors[count++] = *connector;
- }
-
- /* We should be able to check here if the fb has the same properties
- * and then just flip_or_move it */
- if (set->crtc->fb != set->fb) {
- /* If we have no fb then treat it as a full mode set */
- if (set->crtc->fb == NULL) {
- DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
- mode_changed = true;
- } else if (set->fb == NULL) {
- mode_changed = true;
- } else
- fb_changed = true;
- }
-
- if (set->x != set->crtc->x || set->y != set->crtc->y)
- fb_changed = true;
-
- if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
- DRM_DEBUG_KMS("modes are different, full mode set\n");
- drm_mode_debug_printmodeline(&set->crtc->mode);
- drm_mode_debug_printmodeline(set->mode);
- mode_changed = true;
- }
-
- /* a) traverse passed in connector list and get encoders for them */
- count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_connector_helper_funcs *connector_funcs =
- connector->helper_private;
- new_encoder = connector->encoder;
- for (ro = 0; ro < set->num_connectors; ro++) {
- if (set->connectors[ro] == connector) {
- new_encoder = connector_funcs->best_encoder(connector);
- /* if we can't get an encoder for a connector
- we are setting now - then fail */
- if (new_encoder == NULL)
- /* don't break so fail path works correct */
- fail = 1;
- break;
- }
- }
-
- if (new_encoder != connector->encoder) {
- DRM_DEBUG_KMS("encoder changed, full mode switch\n");
- mode_changed = true;
- /* If the encoder is reused for another connector, then
- * the appropriate crtc will be set later.
- */
- if (connector->encoder)
- connector->encoder->crtc = NULL;
- connector->encoder = new_encoder;
- }
- }
-
- if (fail) {
- ret = -EINVAL;
- goto fail;
- }
-
- count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder)
- continue;
-
- if (connector->encoder->crtc == set->crtc)
- new_crtc = NULL;
- else
- new_crtc = connector->encoder->crtc;
-
- for (ro = 0; ro < set->num_connectors; ro++) {
- if (set->connectors[ro] == connector)
- new_crtc = set->crtc;
- }
-
- /* Make sure the new CRTC will work with the encoder */
- if (new_crtc &&
- !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
- ret = -EINVAL;
- goto fail;
- }
- if (new_crtc != connector->encoder->crtc) {
- DRM_DEBUG_KMS("crtc changed, full mode switch\n");
- mode_changed = true;
- connector->encoder->crtc = new_crtc;
- }
- if (new_crtc) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
- connector->base.id, drm_get_connector_name(connector),
- new_crtc->base.id);
- } else {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
- connector->base.id, drm_get_connector_name(connector));
- }
- }
-
- /* mode_set_base is not a required function */
- if (fb_changed && !crtc_funcs->mode_set_base)
- mode_changed = true;
-
- if (mode_changed) {
- set->crtc->enabled = (set->mode != NULL);
- if (set->mode != NULL) {
- DRM_DEBUG_KMS("attempting to set mode from"
- " userspace\n");
- drm_mode_debug_printmodeline(set->mode);
- old_fb = set->crtc->fb;
- set->crtc->fb = set->fb;
- if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
- set->x, set->y,
- old_fb)) {
- DRM_ERROR("failed to set mode on [CRTC:%d]\n",
- set->crtc->base.id);
- ret = -EINVAL;
- goto fail;
- }
- }
- drm_helper_disable_unused_functions(dev);
- } else if (fb_changed) {
- set->crtc->x = set->x;
- set->crtc->y = set->y;
-
- old_fb = set->crtc->fb;
- if (set->crtc->fb != set->fb)
- set->crtc->fb = set->fb;
- ret = crtc_funcs->mode_set_base(set->crtc,
- set->x, set->y, old_fb);
- if (ret != 0)
- goto fail;
- }
- DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
- for (i = 0; i < set->num_connectors; i++) {
- DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
- drm_get_connector_name(set->connectors[i]));
- set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
- }
-
- kfree(save_connectors);
- kfree(save_encoders);
- kfree(save_crtcs);
- return 0;
-
-fail:
- /* Restore all previous data. */
- count = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- *crtc = save_crtcs[count++];
- }
-
- count = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- *encoder = save_encoders[count++];
- }
-
- count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- *connector = save_connectors[count++];
- }
-
- kfree(save_connectors);
- kfree(save_encoders);
- kfree(save_crtcs);
- return ret;
-}
-EXPORT_SYMBOL(drm_crtc_helper_set_config);
-
-static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
-{
- int dpms = DRM_MODE_DPMS_OFF;
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- if (connector->dpms < dpms)
- dpms = connector->dpms;
- return dpms;
-}
-
-static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
-{
- int dpms = DRM_MODE_DPMS_OFF;
- struct drm_connector *connector;
- struct drm_device *dev = crtc->dev;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder && connector->encoder->crtc == crtc)
- if (connector->dpms < dpms)
- dpms = connector->dpms;
- return dpms;
-}
-
-/**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
- *
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
- */
-void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
-{
- struct drm_encoder *encoder = connector->encoder;
- struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
- int old_dpms;
-
- if (mode == connector->dpms)
- return;
-
- old_dpms = connector->dpms;
- connector->dpms = mode;
-
- /* from off to on, do crtc then encoder */
- if (mode < old_dpms) {
- if (crtc) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- if (crtc_funcs->dpms)
- (*crtc_funcs->dpms) (crtc,
- drm_helper_choose_crtc_dpms(crtc));
- }
- if (encoder) {
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
- }
- }
-
- /* from on to off, do encoder then crtc */
- if (mode > old_dpms) {
- if (encoder) {
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
- }
- if (crtc) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- if (crtc_funcs->dpms)
- (*crtc_funcs->dpms) (crtc,
- drm_helper_choose_crtc_dpms(crtc));
- }
- }
-
- return;
-}
-EXPORT_SYMBOL(drm_helper_connector_dpms);
-
-int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
- struct drm_mode_fb_cmd *mode_cmd)
-{
- fb->width = mode_cmd->width;
- fb->height = mode_cmd->height;
- fb->pitch = mode_cmd->pitch;
- fb->bits_per_pixel = mode_cmd->bpp;
- fb->depth = mode_cmd->depth;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
-
-int drm_helper_resume_force_mode(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_crtc_helper_funcs *crtc_funcs;
- int ret;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- if (!crtc->enabled)
- continue;
-
- ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
-
- if (ret == false)
- DRM_ERROR("failed to set mode on crtc %p\n", crtc);
-
- /* Turn off outputs that were already powered off */
- if (drm_helper_choose_crtc_dpms(crtc)) {
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
- if(encoder->crtc != crtc)
- continue;
-
- encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
- }
-
- crtc_funcs = crtc->helper_private;
- if (crtc_funcs->dpms)
- (*crtc_funcs->dpms) (crtc,
- drm_helper_choose_crtc_dpms(crtc));
- }
- }
- /* disable the unused connectors while restoring the modesetting */
- drm_helper_disable_unused_functions(dev);
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_resume_force_mode);
-
-#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void output_poll_execute(struct work_struct *work)
-{
- struct delayed_work *delayed_work = to_delayed_work(work);
- struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
- struct drm_connector *connector;
- enum drm_connector_status old_status;
- bool repoll = false, changed = false;
-
- if (!drm_kms_helper_poll)
- return;
-
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
- /* if this is HPD or polled don't check it -
- TV out for instance */
- if (!connector->polled)
- continue;
-
- else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
- repoll = true;
-
- old_status = connector->status;
- /* if we are connected and don't want to poll for disconnect
- skip it */
- if (old_status == connector_status_connected &&
- !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
- !(connector->polled & DRM_CONNECTOR_POLL_HPD))
- continue;
-
- connector->status = connector->funcs->detect(connector, false);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
- connector->base.id,
- drm_get_connector_name(connector),
- old_status, connector->status);
- if (old_status != connector->status)
- changed = true;
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-
- if (changed) {
- /* send a uevent + call fbdev */
- drm_sysfs_hotplug_event(dev);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
- }
-
- if (repoll)
- queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
-}
-
-void drm_kms_helper_poll_disable(struct drm_device *dev)
-{
- if (!dev->mode_config.poll_enabled)
- return;
- cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
-}
-EXPORT_SYMBOL(drm_kms_helper_poll_disable);
-
-void drm_kms_helper_poll_enable(struct drm_device *dev)
-{
- bool poll = false;
- struct drm_connector *connector;
-
- if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
- return;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->polled)
- poll = true;
- }
-
- if (poll)
- queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
-}
-EXPORT_SYMBOL(drm_kms_helper_poll_enable);
-
-void drm_kms_helper_poll_init(struct drm_device *dev)
-{
- INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
- dev->mode_config.poll_enabled = true;
-
- drm_kms_helper_poll_enable(dev);
-}
-EXPORT_SYMBOL(drm_kms_helper_poll_init);
-
-void drm_kms_helper_poll_fini(struct drm_device *dev)
-{
- drm_kms_helper_poll_disable(dev);
-}
-EXPORT_SYMBOL(drm_kms_helper_poll_fini);
-
-void drm_helper_hpd_irq_event(struct drm_device *dev)
-{
- if (!dev->mode_config.poll_enabled)
- return;
-
- /* kill timer and schedule immediate execution, this doesn't block */
- cancel_delayed_work(&dev->mode_config.output_poll_work);
- if (drm_kms_helper_poll)
- queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
-}
-EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+++ /dev/null
-/**
- * \file drm_debugfs.c
- * debugfs support for DRM
- *
- * \author Ben Gamari <bgamari@gmail.com>
- */
-
-/*
- * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
- *
- * Copyright 2008 Ben Gamari <bgamari@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include "drmP.h"
-
-#if defined(CONFIG_DEBUG_FS)
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-static struct drm_info_list drm_debugfs_list[] = {
- {"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
- {"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
- {"bufs", drm_bufs_info, 0},
- {"gem_names", drm_gem_name_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
- {"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
-
-
-static int drm_debugfs_open(struct inode *inode, struct file *file)
-{
- struct drm_info_node *node = inode->i_private;
-
- return single_open(file, node->info_ent->show, node);
-}
-
-
-static const struct file_operations drm_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = drm_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-/**
- * Initialize a given set of debugfs files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI debugfs dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of debugfs files represented by an array of
- * gdm_debugfs_lists in the given root directory.
- */
-int drm_debugfs_create_files(struct drm_info_list *files, int count,
- struct dentry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct dentry *ent;
- struct drm_info_node *tmp;
- char name[64];
- int i, ret;
-
- for (i = 0; i < count; i++) {
- u32 features = files[i].driver_features;
-
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
- continue;
-
- tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (tmp == NULL) {
- ret = -1;
- goto fail;
- }
- ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
- root, tmp, &drm_debugfs_fops);
- if (!ent) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
- name, files[i].name);
- kfree(tmp);
- ret = -1;
- goto fail;
- }
-
- tmp->minor = minor;
- tmp->dent = ent;
- tmp->info_ent = &files[i];
- list_add(&(tmp->list), &(minor->debugfs_nodes.list));
- }
- return 0;
-
-fail:
- drm_debugfs_remove_files(files, count, minor);
- return ret;
-}
-EXPORT_SYMBOL(drm_debugfs_create_files);
-
-/**
- * Initialize the DRI debugfs filesystem for a device
- *
- * \param dev DRM device
- * \param minor device minor number
- * \param root DRI debugfs dir entry.
- *
- * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
- * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
- * "/sys/kernel/debug/dri/%minor%/%name%".
- */
-int drm_debugfs_init(struct drm_minor *minor, int minor_id,
- struct dentry *root)
-{
- struct drm_device *dev = minor->dev;
- char name[64];
- int ret;
-
- INIT_LIST_HEAD(&minor->debugfs_nodes.list);
- sprintf(name, "%d", minor_id);
- minor->debugfs_root = debugfs_create_dir(name, root);
- if (!minor->debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
- return -1;
- }
-
- ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
- if (ret) {
- debugfs_remove(minor->debugfs_root);
- minor->debugfs_root = NULL;
- DRM_ERROR("Failed to create core drm debugfs files\n");
- return ret;
- }
-
- if (dev->driver->debugfs_init) {
- ret = dev->driver->debugfs_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/sys/kernel/debug/dri.\n");
- return ret;
- }
- }
- return 0;
-}
-
-
-/**
- * Remove a list of debugfs files
- *
- * \param files The list of files
- * \param count The number of files
- * \param minor The minor of which we should remove the files
- * \return always zero.
- *
- * Remove all debugfs entries created by debugfs_init().
- */
-int drm_debugfs_remove_files(struct drm_info_list *files, int count,
- struct drm_minor *minor)
-{
- struct list_head *pos, *q;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
- tmp = list_entry(pos, struct drm_info_node, list);
- if (tmp->info_ent == &files[i]) {
- debugfs_remove(tmp->dent);
- list_del(pos);
- kfree(tmp);
- }
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_debugfs_remove_files);
-
-/**
- * Cleanup the debugfs filesystem resources.
- *
- * \param minor device minor number.
- * \return always zero.
- *
- * Remove all debugfs entries created by debugfs_init().
- */
-int drm_debugfs_cleanup(struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
-
- if (!minor->debugfs_root)
- return 0;
-
- if (dev->driver->debugfs_cleanup)
- dev->driver->debugfs_cleanup(minor);
-
- drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
-
- debugfs_remove(minor->debugfs_root);
- minor->debugfs_root = NULL;
-
- return 0;
-}
-
-#endif /* CONFIG_DEBUG_FS */
-
+++ /dev/null
-/**
- * \file drm_dma.c
- * DMA IOCTL and function support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Initialize the DMA data.
- *
- * \param dev DRM device.
- * \return zero on success or a negative value on failure.
- *
- * Allocate and initialize a drm_device_dma structure.
- */
-int drm_dma_setup(struct drm_device *dev)
-{
- int i;
-
- dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
- if (!dev->dma)
- return -ENOMEM;
-
- for (i = 0; i <= DRM_MAX_ORDER; i++)
- memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
-
- return 0;
-}
-
-/**
- * Cleanup the DMA resources.
- *
- * \param dev DRM device.
- *
- * Free all pages associated with DMA buffers, the buffers and pages lists, and
- * finally the drm_device::dma structure itself.
- */
-void drm_dma_takedown(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i, j;
-
- if (!dma)
- return;
-
- /* Clear dma buffers */
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].seg_count) {
- DRM_DEBUG("order %d: buf_count = %d,"
- " seg_count = %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].seg_count);
- for (j = 0; j < dma->bufs[i].seg_count; j++) {
- if (dma->bufs[i].seglist[j]) {
- drm_pci_free(dev, dma->bufs[i].seglist[j]);
- }
- }
- kfree(dma->bufs[i].seglist);
- }
- if (dma->bufs[i].buf_count) {
- for (j = 0; j < dma->bufs[i].buf_count; j++) {
- kfree(dma->bufs[i].buflist[j].dev_private);
- }
- kfree(dma->bufs[i].buflist);
- }
- }
-
- kfree(dma->buflist);
- kfree(dma->pagelist);
- kfree(dev->dma);
- dev->dma = NULL;
-}
-
-/**
- * Free a buffer.
- *
- * \param dev DRM device.
- * \param buf buffer to free.
- *
- * Resets the fields of \p buf.
- */
-void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
-{
- if (!buf)
- return;
-
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
- buf->used = 0;
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
- && waitqueue_active(&buf->dma_wait)) {
- wake_up_interruptible(&buf->dma_wait);
- }
-}
-
-/**
- * Reclaim the buffers.
- *
- * \param file_priv DRM file private.
- *
- * Frees each buffer associated with \p file_priv not already on the hardware.
- */
-void drm_core_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- for (i = 0; i < dma->buf_count; i++) {
- if (dma->buflist[i]->file_priv == file_priv) {
- switch (dma->buflist[i]->list) {
- case DRM_LIST_NONE:
- drm_free_buffer(dev, dma->buflist[i]);
- break;
- case DRM_LIST_WAIT:
- dma->buflist[i]->list = DRM_LIST_RECLAIM;
- break;
- default:
- /* Buffer already on hardware. */
- break;
- }
- }
- }
-}
-
-EXPORT_SYMBOL(drm_core_reclaim_buffers);
+++ /dev/null
-/*
- * Copyright © 2009 Keith Packard
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/i2c.h>
-#include "drm_dp_helper.h"
-#include "drmP.h"
-
-/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
-static int
-i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
- uint8_t write_byte, uint8_t *read_byte)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- ret = (*algo_data->aux_ch)(adapter, mode,
- write_byte, read_byte);
- return ret;
-}
-
-/*
- * I2C over AUX CH
- */
-
-/*
- * Send the address. If the I2C link is running, this 'restarts'
- * the connection with the new address, this is used for doing
- * a write followed by a read (as needed for DDC)
- */
-static int
-i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int mode = MODE_I2C_START;
- int ret;
-
- if (reading)
- mode |= MODE_I2C_READ;
- else
- mode |= MODE_I2C_WRITE;
- algo_data->address = address;
- algo_data->running = true;
- ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
- return ret;
-}
-
-/*
- * Stop the I2C transaction. This closes out the link, sending
- * a bare address packet with the MOT bit turned off
- */
-static void
-i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int mode = MODE_I2C_STOP;
-
- if (reading)
- mode |= MODE_I2C_READ;
- else
- mode |= MODE_I2C_WRITE;
- if (algo_data->running) {
- (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
- algo_data->running = false;
- }
-}
-
-/*
- * Write a single byte to the current I2C address, the
- * the I2C link must be running or this returns -EIO
- */
-static int
-i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- if (!algo_data->running)
- return -EIO;
-
- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
- return ret;
-}
-
-/*
- * Read a single byte from the current I2C address, the
- * I2C link must be running or this returns -EIO
- */
-static int
-i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- if (!algo_data->running)
- return -EIO;
-
- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
- return ret;
-}
-
-static int
-i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs,
- int num)
-{
- int ret = 0;
- bool reading = false;
- int m;
- int b;
-
- for (m = 0; m < num; m++) {
- u16 len = msgs[m].len;
- u8 *buf = msgs[m].buf;
- reading = (msgs[m].flags & I2C_M_RD) != 0;
- ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
- if (ret < 0)
- break;
- if (reading) {
- for (b = 0; b < len; b++) {
- ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
- if (ret < 0)
- break;
- }
- } else {
- for (b = 0; b < len; b++) {
- ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
- if (ret < 0)
- break;
- }
- }
- if (ret < 0)
- break;
- }
- if (ret >= 0)
- ret = num;
- i2c_algo_dp_aux_stop(adapter, reading);
- DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
- return ret;
-}
-
-static u32
-i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA |
- I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
- I2C_FUNC_10BIT_ADDR;
-}
-
-static const struct i2c_algorithm i2c_dp_aux_algo = {
- .master_xfer = i2c_algo_dp_aux_xfer,
- .functionality = i2c_algo_dp_aux_functionality,
-};
-
-static void
-i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
-{
- (void) i2c_algo_dp_aux_address(adapter, 0, false);
- (void) i2c_algo_dp_aux_stop(adapter, false);
-
-}
-
-static int
-i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
-{
- adapter->algo = &i2c_dp_aux_algo;
- adapter->retries = 3;
- i2c_dp_aux_reset_bus(adapter);
- return 0;
-}
-
-int
-i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
-{
- int error;
-
- error = i2c_dp_aux_prepare_bus(adapter);
- if (error)
- return error;
- error = i2c_add_adapter(adapter);
- return error;
-}
-EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+++ /dev/null
-/**
- * \file drm_drawable.c
- * IOCTLs for drawables
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- * \author Michel Dänzer <michel@tungstengraphics.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Allocate drawable ID and memory to store information about it.
- */
-int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- unsigned long irqflags;
- struct drm_draw *draw = data;
- int new_id = 0;
- int ret;
-
-again:
- if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
- ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
- if (ret == -EAGAIN) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- goto again;
- }
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- draw->handle = new_id;
-
- DRM_DEBUG("%d\n", draw->handle);
-
- return 0;
-}
-
-/**
- * Free drawable ID and memory to store information about it.
- */
-int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_draw *draw = data;
- unsigned long irqflags;
- struct drm_drawable_info *info;
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- info = drm_get_drawable_info(dev, draw->handle);
- if (info == NULL) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- return -EINVAL;
- }
- kfree(info->rects);
- kfree(info);
-
- idr_remove(&dev->drw_idr, draw->handle);
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_DEBUG("%d\n", draw->handle);
- return 0;
-}
-
-int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_update_draw *update = data;
- unsigned long irqflags;
- struct drm_clip_rect *rects;
- struct drm_drawable_info *info;
- int err;
-
- info = idr_find(&dev->drw_idr, update->handle);
- if (!info) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
- DRM_ERROR("No such drawable %d\n", update->handle);
- kfree(info);
- return -EINVAL;
- }
- }
-
- switch (update->type) {
- case DRM_DRAWABLE_CLIPRECTS:
- if (update->num == 0)
- rects = NULL;
- else if (update->num != info->num_rects) {
- rects = kmalloc(update->num *
- sizeof(struct drm_clip_rect),
- GFP_KERNEL);
- } else
- rects = info->rects;
-
- if (update->num && !rects) {
- DRM_ERROR("Failed to allocate cliprect memory\n");
- err = -ENOMEM;
- goto error;
- }
-
- if (update->num && DRM_COPY_FROM_USER(rects,
- (struct drm_clip_rect __user *)
- (unsigned long)update->data,
- update->num *
- sizeof(*rects))) {
- DRM_ERROR("Failed to copy cliprects from userspace\n");
- err = -EFAULT;
- goto error;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- if (rects != info->rects) {
- kfree(info->rects);
- }
-
- info->rects = rects;
- info->num_rects = update->num;
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- DRM_DEBUG("Updated %d cliprects for drawable %d\n",
- info->num_rects, update->handle);
- break;
- default:
- DRM_ERROR("Invalid update type %d\n", update->type);
- return -EINVAL;
- }
-
- return 0;
-
-error:
- if (rects != info->rects)
- kfree(rects);
-
- return err;
-}
-
-/**
- * Caller must hold the drawable spinlock!
- */
-struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
-{
- return idr_find(&dev->drw_idr, id);
-}
-EXPORT_SYMBOL(drm_get_drawable_info);
-
-static int drm_drawable_free(int idr, void *p, void *data)
-{
- struct drm_drawable_info *info = p;
-
- if (info) {
- kfree(info->rects);
- kfree(info);
- }
-
- return 0;
-}
-
-void drm_drawable_free_all(struct drm_device *dev)
-{
- idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
- idr_remove_all(&dev->drw_idr);
-}
+++ /dev/null
-/**
- * \file drm_drv.c
- * Generic driver template
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * To use this template, you must at least define the following (samples
- * given for the MGA driver):
- *
- * \code
- * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
- *
- * #define DRIVER_NAME "mga"
- * #define DRIVER_DESC "Matrox G200/G400"
- * #define DRIVER_DATE "20001127"
- *
- * #define drm_x mga_##x
- * \endcode
- */
-
-/*
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include "drmP.h"
-#include "drm_core.h"
-
-
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
-
-/** Ioctl table */
-static struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
- /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-#if __OS_HAS_AGP
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#endif
-
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
-};
-
-#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
- struct drm_vma_entry *vma, *vma_temp;
- int i;
-
- DRM_DEBUG("\n");
-
- if (dev->driver->lastclose)
- dev->driver->lastclose(dev);
- DRM_DEBUG("driver lastclose completed\n");
-
- if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- /* Clear AGP information */
- if (drm_core_has_AGP(dev) && dev->agp &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_agp_mem *entry, *tempe;
-
- /* Remove AGP resources, but leave dev->agp
- intact until drv_cleanup is called. */
- list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
- if (entry->bound)
- drm_unbind_agp(entry->memory);
- drm_free_agp(entry->memory, entry->pages);
- kfree(entry);
- }
- INIT_LIST_HEAD(&dev->agp->memory);
-
- if (dev->agp->acquired)
- drm_agp_release(dev);
-
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
- }
- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_sg_cleanup(dev->sg);
- dev->sg = NULL;
- }
-
- /* Clear vma list (only built for debugging) */
- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
- list_del(&vma->head);
- kfree(vma);
- }
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
- for (i = 0; i < dev->queue_count; i++) {
- kfree(dev->queuelist[i]);
- dev->queuelist[i] = NULL;
- }
- kfree(dev->queuelist);
- dev->queuelist = NULL;
- }
- dev->queue_count = 0;
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_dma_takedown(dev);
-
- dev->dev_mapping = NULL;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("lastclose completed\n");
- return 0;
-}
-
-/**
- * Module initialization. Called via init_module at module load time, or via
- * linux/init/main.c (this is not currently supported).
- *
- * \return zero on success or a negative number on failure.
- *
- * Initializes an array of drm_device structures, and attempts to
- * initialize all available devices, using consecutive minors, registering the
- * stubs and initializing the device.
- *
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
-int drm_init(struct drm_driver *driver)
-{
- DRM_DEBUG("\n");
- INIT_LIST_HEAD(&driver->device_list);
-
- if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
- return drm_platform_init(driver);
- else
- return drm_pci_init(driver);
-}
-
-EXPORT_SYMBOL(drm_init);
-
-void drm_exit(struct drm_driver *driver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(&driver->pci_driver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
-
- DRM_INFO("Module unloaded\n");
-}
-
-EXPORT_SYMBOL(drm_exit);
-
-/** File operations structure */
-static const struct file_operations drm_stub_fops = {
- .owner = THIS_MODULE,
- .open = drm_stub_open,
- .llseek = noop_llseek,
-};
-
-static int __init drm_core_init(void)
-{
- int ret = -ENOMEM;
-
- drm_global_init();
- idr_init(&drm_minors_idr);
-
- if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
- goto err_p1;
-
- drm_class = drm_sysfs_create(THIS_MODULE, "drm");
- if (IS_ERR(drm_class)) {
- printk(KERN_ERR "DRM: Error creating drm class.\n");
- ret = PTR_ERR(drm_class);
- goto err_p2;
- }
-
- drm_proc_root = proc_mkdir("dri", NULL);
- if (!drm_proc_root) {
- DRM_ERROR("Cannot create /proc/dri\n");
- ret = -1;
- goto err_p3;
- }
-
- drm_debugfs_root = debugfs_create_dir("dri", NULL);
- if (!drm_debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
- ret = -1;
- goto err_p3;
- }
-
- DRM_INFO("Initialized %s %d.%d.%d %s\n",
- CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
- return 0;
-err_p3:
- drm_sysfs_destroy();
-err_p2:
- unregister_chrdev(DRM_MAJOR, "drm");
-
- idr_destroy(&drm_minors_idr);
-err_p1:
- return ret;
-}
-
-static void __exit drm_core_exit(void)
-{
- remove_proc_entry("dri", NULL);
- debugfs_remove(drm_debugfs_root);
- drm_sysfs_destroy();
-
- unregister_chrdev(DRM_MAJOR, "drm");
-
- idr_remove_all(&drm_minors_idr);
- idr_destroy(&drm_minors_idr);
-}
-
-module_init(drm_core_init);
-module_exit(drm_core_exit);
-
-/**
- * Copy and IOCTL return string to user space
- */
-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
-{
- int len;
-
- /* don't overflow userbuf */
- len = strlen(value);
- if (len > *buf_len)
- len = *buf_len;
-
- /* let userspace know exact length of driver value (which could be
- * larger than the userspace-supplied buffer) */
- *buf_len = strlen(value);
-
- /* finally, try filling in the userbuf */
- if (len && buf)
- if (copy_to_user(buf, value, len))
- return -EFAULT;
- return 0;
-}
-
-/**
- * Get version information
- *
- * \param inode device inode.
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_version structure.
- * \return zero on success or negative number on failure.
- *
- * Fills in the version information in \p arg.
- */
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_version *version = data;
- int err;
-
- version->version_major = dev->driver->major;
- version->version_minor = dev->driver->minor;
- version->version_patchlevel = dev->driver->patchlevel;
- err = drm_copy_field(version->name, &version->name_len,
- dev->driver->name);
- if (!err)
- err = drm_copy_field(version->date, &version->date_len,
- dev->driver->date);
- if (!err)
- err = drm_copy_field(version->desc, &version->desc_len,
- dev->driver->desc);
-
- return err;
-}
-
-/**
- * Called whenever a process performs an ioctl on /dev/drm.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
- *
- * Looks up the ioctl function in the ::ioctls table, checking for root
- * previleges if so required, and dispatches to the respective function.
- */
-long drm_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev;
- struct drm_ioctl_desc *ioctl;
- drm_ioctl_t *func;
- unsigned int nr = DRM_IOCTL_NR(cmd);
- int retcode = -EINVAL;
- char stack_kdata[128];
- char *kdata = NULL;
- unsigned int usize, asize;
-
- dev = file_priv->minor->dev;
- atomic_inc(&dev->ioctl_count);
- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
- ++file_priv->ioctl_count;
-
- DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
- task_pid_nr(current), cmd, nr,
- (long)old_encode_dev(file_priv->minor->device),
- file_priv->authenticated);
-
- if ((nr >= DRM_CORE_IOCTL_COUNT) &&
- ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
- goto err_i1;
- if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- u32 drv_size;
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- drv_size = _IOC_SIZE(ioctl->cmd_drv);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
- }
- else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
- ioctl = &drm_ioctls[nr];
- cmd = ioctl->cmd;
- usize = asize = _IOC_SIZE(cmd);
- } else
- goto err_i1;
-
- /* Do not trust userspace, use our own definition */
- func = ioctl->func;
- /* is there a local override? */
- if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
- func = dev->driver->dma_ioctl;
-
- if (!func) {
- DRM_DEBUG("no function\n");
- retcode = -EINVAL;
- } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
- ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
- ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
- (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
- retcode = -EACCES;
- } else {
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (asize <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(asize, GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
- }
- }
-
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- usize) != 0) {
- retcode = -EFAULT;
- goto err_i1;
- }
- } else
- memset(kdata, 0, usize);
-
- if (ioctl->flags & DRM_UNLOCKED)
- retcode = func(dev, kdata, file_priv);
- else {
- mutex_lock(&drm_global_mutex);
- retcode = func(dev, kdata, file_priv);
- mutex_unlock(&drm_global_mutex);
- }
-
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- usize) != 0)
- retcode = -EFAULT;
- }
- }
-
- err_i1:
- if (kdata != stack_kdata)
- kfree(kdata);
- atomic_dec(&dev->ioctl_count);
- if (retcode)
- DRM_DEBUG("ret = %x\n", retcode);
- return retcode;
-}
-
-EXPORT_SYMBOL(drm_ioctl);
-
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && entry->map->type == _DRM_SHM &&
- (entry->map->flags & _DRM_CONTAINS_LOCK)) {
- return entry->map;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(drm_getsarea);
+++ /dev/null
-/*
- * Copyright (c) 2006 Luc Verhaegen (quirks list)
- * Copyright (c) 2007-2008 Intel Corporation
- * Jesse Barnes <jesse.barnes@intel.com>
- * Copyright 2010 Red Hat, Inc.
- *
- * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
- * FB layer.
- * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include "drmP.h"
-#include "drm_edid.h"
-#include "drm_edid_modes.h"
-
-#define version_greater(edid, maj, min) \
- (((edid)->version > (maj)) || \
- ((edid)->version == (maj) && (edid)->revision > (min)))
-
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
-
-/*
- * EDID blocks out in the wild have a variety of bugs, try to collect
- * them here (note that userspace may work around broken monitors first,
- * but fixes should make their way here so that the kernel "just works"
- * on as many displays as possible).
- */
-
-/* First detailed mode wrong, use largest 60Hz mode */
-#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
-/* Reported 135MHz pixel clock is too high, needs adjustment */
-#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
-/* Prefer the largest mode at 75 Hz */
-#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
-/* Detail timing is in cm not mm */
-#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
-/* Detailed timing descriptors have bogus size values, so just take the
- * maximum size and use that.
- */
-#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
-/* Monitor forgot to set the first detailed is preferred bit. */
-#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
-/* use +hsync +vsync for detailed mode */
-#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-
-struct detailed_mode_closure {
- struct drm_connector *connector;
- struct edid *edid;
- bool preferred;
- u32 quirks;
- int modes;
-};
-
-#define LEVEL_DMT 0
-#define LEVEL_GTF 1
-#define LEVEL_GTF2 2
-#define LEVEL_CVT 3
-
-static struct edid_quirk {
- char *vendor;
- int product_id;
- u32 quirks;
-} edid_quirk_list[] = {
- /* Acer AL1706 */
- { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
- /* Acer F51 */
- { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
- /* Unknown Acer */
- { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Belinea 10 15 55 */
- { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
- { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
-
- /* Envision Peripherals, Inc. EN-7100e */
- { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
- /* Envision EN2028 */
- { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
-
- /* Funai Electronics PM36B */
- { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
- EDID_QUIRK_DETAILED_IN_CM },
-
- /* LG Philips LCD LP154W01-A5 */
- { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
- { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
-
- /* Philips 107p5 CRT */
- { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Proview AY765C */
- { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Samsung SyncMaster 205BW. Note: irony */
- { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
- /* Samsung SyncMaster 22[5-6]BW */
- { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
- { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
-};
-
-/*** DDC fetch and block validation ***/
-
-static const u8 edid_header[] = {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
-};
-
-/*
- * Sanity check the EDID block (base or extension). Return 0 if the block
- * doesn't check out, or 1 if it's valid.
- */
-static bool
-drm_edid_block_valid(u8 *raw_edid)
-{
- int i;
- u8 csum = 0;
- struct edid *edid = (struct edid *)raw_edid;
-
- if (raw_edid[0] == 0x00) {
- int score = 0;
-
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
-
- if (score == 8) ;
- else if (score >= 6) {
- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
- memcpy(raw_edid, edid_header, sizeof(edid_header));
- } else {
- goto bad;
- }
- }
-
- for (i = 0; i < EDID_LENGTH; i++)
- csum += raw_edid[i];
- if (csum) {
- DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
-
- /* allow CEA to slide through, switches mangle this */
- if (raw_edid[0] != 0x02)
- goto bad;
- }
-
- /* per-block-type checks */
- switch (raw_edid[0]) {
- case 0: /* base */
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
- goto bad;
- }
-
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
- break;
-
- default:
- break;
- }
-
- return 1;
-
-bad:
- if (raw_edid) {
- DRM_ERROR("Raw EDID:\n");
- print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
- printk("\n");
- }
- return 0;
-}
-
-/**
- * drm_edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity-check an entire EDID record (including extensions)
- */
-bool drm_edid_is_valid(struct edid *edid)
-{
- int i;
- u8 *raw = (u8 *)edid;
-
- if (!edid)
- return false;
-
- for (i = 0; i <= edid->extensions; i++)
- if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
- return false;
-
- return true;
-}
-EXPORT_SYMBOL(drm_edid_is_valid);
-
-#define DDC_ADDR 0x50
-#define DDC_SEGMENT_ADDR 0x30
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Try to fetch EDID information by calling i2c driver function.
- */
-static int
-drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
- int block, int len)
-{
- unsigned char start = block * EDID_LENGTH;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
-
- return -1;
-}
-
-static u8 *
-drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
-{
- int i, j = 0, valid_extensions = 0;
- u8 *block, *new;
-
- if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
- return NULL;
-
- /* base block fetch */
- for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
- goto out;
- if (drm_edid_block_valid(block))
- break;
- }
- if (i == 4)
- goto carp;
-
- /* if there's no extensions, we're done */
- if (block[0x7e] == 0)
- return block;
-
- new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
- if (!new)
- goto out;
- block = new;
-
- for (j = 1; j <= block[0x7e]; j++) {
- for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter,
- block + (valid_extensions + 1) * EDID_LENGTH,
- j, EDID_LENGTH))
- goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
- valid_extensions++;
- break;
- }
- }
- if (i == 4)
- dev_warn(connector->dev->dev,
- "%s: Ignoring invalid EDID block %d.\n",
- drm_get_connector_name(connector), j);
- }
-
- if (valid_extensions != block[0x7e]) {
- block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
- block[0x7e] = valid_extensions;
- new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
- if (!new)
- goto out;
- block = new;
- }
-
- return block;
-
-carp:
- dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
- drm_get_connector_name(connector), j);
-
-out:
- kfree(block);
- return NULL;
-}
-
-/**
- * Probe DDC presence.
- *
- * \param adapter : i2c device adaptor
- * \return 1 on success
- */
-static bool
-drm_probe_ddc(struct i2c_adapter *adapter)
-{
- unsigned char out;
-
- return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
-}
-
-/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
- *
- * Poke the given i2c channel to grab EDID data if possible. If found,
- * attach it to the connector.
- *
- * Return edid data or NULL if we couldn't find any.
- */
-struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter)
-{
- struct edid *edid = NULL;
-
- if (drm_probe_ddc(adapter))
- edid = (struct edid *)drm_do_get_edid(connector, adapter);
-
- connector->display_info.raw_edid = (char *)edid;
-
- return edid;
-
-}
-EXPORT_SYMBOL(drm_get_edid);
-
-/*** EDID parsing ***/
-
-/**
- * edid_vendor - match a string against EDID's obfuscated vendor field
- * @edid: EDID to match
- * @vendor: vendor string
- *
- * Returns true if @vendor is in @edid, false otherwise
- */
-static bool edid_vendor(struct edid *edid, char *vendor)
-{
- char edid_vendor[3];
-
- edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
- edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
- ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
- edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
-
- return !strncmp(edid_vendor, vendor, 3);
-}
-
-/**
- * edid_get_quirks - return quirk flags for a given EDID
- * @edid: EDID to process
- *
- * This tells subsequent routines what fixes they need to apply.
- */
-static u32 edid_get_quirks(struct edid *edid)
-{
- struct edid_quirk *quirk;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
- quirk = &edid_quirk_list[i];
-
- if (edid_vendor(edid, quirk->vendor) &&
- (EDID_PRODUCT_ID(edid) == quirk->product_id))
- return quirk->quirks;
- }
-
- return 0;
-}
-
-#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
-#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
-
-/**
- * edid_fixup_preferred - set preferred modes based on quirk list
- * @connector: has mode list to fix up
- * @quirks: quirks list
- *
- * Walk the mode list for @connector, clearing the preferred status
- * on existing modes and setting it anew for the right mode ala @quirks.
- */
-static void edid_fixup_preferred(struct drm_connector *connector,
- u32 quirks)
-{
- struct drm_display_mode *t, *cur_mode, *preferred_mode;
- int target_refresh = 0;
-
- if (list_empty(&connector->probed_modes))
- return;
-
- if (quirks & EDID_QUIRK_PREFER_LARGE_60)
- target_refresh = 60;
- if (quirks & EDID_QUIRK_PREFER_LARGE_75)
- target_refresh = 75;
-
- preferred_mode = list_first_entry(&connector->probed_modes,
- struct drm_display_mode, head);
-
- list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
- cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
-
- if (cur_mode == preferred_mode)
- continue;
-
- /* Largest mode is preferred */
- if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
- preferred_mode = cur_mode;
-
- /* At a given size, try to get closest to target refresh */
- if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
- MODE_REFRESH_DIFF(cur_mode, target_refresh) <
- MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
- preferred_mode = cur_mode;
- }
- }
-
- preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
-}
-
-struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
-{
- int i;
- struct drm_display_mode *ptr, *mode;
-
- mode = NULL;
- for (i = 0; i < drm_num_dmt_modes; i++) {
- ptr = &drm_dmt_modes[i];
- if (hsize == ptr->hdisplay &&
- vsize == ptr->vdisplay &&
- fresh == drm_mode_vrefresh(ptr)) {
- /* get the expected default mode */
- mode = drm_mode_duplicate(dev, ptr);
- break;
- }
- }
- return mode;
-}
-EXPORT_SYMBOL(drm_mode_find_dmt);
-
-typedef void detailed_cb(struct detailed_timing *timing, void *closure);
-
-static void
-cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
-{
- int i, n = 0;
- u8 rev = ext[0x01], d = ext[0x02];
- u8 *det_base = ext + d;
-
- switch (rev) {
- case 0:
- /* can't happen */
- return;
- case 1:
- /* have to infer how many blocks we have, check pixel clock */
- for (i = 0; i < 6; i++)
- if (det_base[18*i] || det_base[18*i+1])
- n++;
- break;
- default:
- /* explicit count */
- n = min(ext[0x03] & 0x0f, 6);
- break;
- }
-
- for (i = 0; i < n; i++)
- cb((struct detailed_timing *)(det_base + 18 * i), closure);
-}
-
-static void
-vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
-{
- unsigned int i, n = min((int)ext[0x02], 6);
- u8 *det_base = ext + 5;
-
- if (ext[0x01] != 1)
- return; /* unknown version */
-
- for (i = 0; i < n; i++)
- cb((struct detailed_timing *)(det_base + 18 * i), closure);
-}
-
-static void
-drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
-{
- int i;
- struct edid *edid = (struct edid *)raw_edid;
-
- if (edid == NULL)
- return;
-
- for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
- cb(&(edid->detailed_timings[i]), closure);
-
- for (i = 1; i <= raw_edid[0x7e]; i++) {
- u8 *ext = raw_edid + (i * EDID_LENGTH);
- switch (*ext) {
- case CEA_EXT:
- cea_for_each_detailed_block(ext, cb, closure);
- break;
- case VTB_EXT:
- vtb_for_each_detailed_block(ext, cb, closure);
- break;
- default:
- break;
- }
- }
-}
-
-static void
-is_rb(struct detailed_timing *t, void *data)
-{
- u8 *r = (u8 *)t;
- if (r[3] == EDID_DETAIL_MONITOR_RANGE)
- if (r[15] & 0x10)
- *(bool *)data = true;
-}
-
-/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
-static bool
-drm_monitor_supports_rb(struct edid *edid)
-{
- if (edid->revision >= 4) {
- bool ret;
- drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
- return ret;
- }
-
- return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
-}
-
-static void
-find_gtf2(struct detailed_timing *t, void *data)
-{
- u8 *r = (u8 *)t;
- if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
- *(u8 **)data = r;
-}
-
-/* Secondary GTF curve kicks in above some break frequency */
-static int
-drm_gtf2_hbreak(struct edid *edid)
-{
- u8 *r = NULL;
- drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
- return r ? (r[12] * 2) : 0;
-}
-
-static int
-drm_gtf2_2c(struct edid *edid)
-{
- u8 *r = NULL;
- drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
- return r ? r[13] : 0;
-}
-
-static int
-drm_gtf2_m(struct edid *edid)
-{
- u8 *r = NULL;
- drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
- return r ? (r[15] << 8) + r[14] : 0;
-}
-
-static int
-drm_gtf2_k(struct edid *edid)
-{
- u8 *r = NULL;
- drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
- return r ? r[16] : 0;
-}
-
-static int
-drm_gtf2_2j(struct edid *edid)
-{
- u8 *r = NULL;
- drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
- return r ? r[17] : 0;
-}
-
-/**
- * standard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
- */
-static int standard_timing_level(struct edid *edid)
-{
- if (edid->revision >= 2) {
- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
- return LEVEL_CVT;
- if (drm_gtf2_hbreak(edid))
- return LEVEL_GTF2;
- return LEVEL_GTF;
- }
- return LEVEL_DMT;
-}
-
-/*
- * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
- * monitors fill with ascii space (0x20) instead.
- */
-static int
-bad_std_timing(u8 a, u8 b)
-{
- return (a == 0x00 && b == 0x00) ||
- (a == 0x01 && b == 0x01) ||
- (a == 0x20 && b == 0x20);
-}
-
-/**
- * drm_mode_std - convert standard mode info (width, height, refresh) into mode
- * @t: standard timing params
- * @timing_level: standard timing level
- *
- * Take the standard timing params (in this case width, aspect, and refresh)
- * and convert them into a real mode using CVT/GTF/DMT.
- */
-static struct drm_display_mode *
-drm_mode_std(struct drm_connector *connector, struct edid *edid,
- struct std_timing *t, int revision)
-{
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *m, *mode = NULL;
- int hsize, vsize;
- int vrefresh_rate;
- unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
- >> EDID_TIMING_ASPECT_SHIFT;
- unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
- >> EDID_TIMING_VFREQ_SHIFT;
- int timing_level = standard_timing_level(edid);
-
- if (bad_std_timing(t->hsize, t->vfreq_aspect))
- return NULL;
-
- /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
- hsize = t->hsize * 8 + 248;
- /* vrefresh_rate = vfreq + 60 */
- vrefresh_rate = vfreq + 60;
- /* the vdisplay is calculated based on the aspect ratio */
- if (aspect_ratio == 0) {
- if (revision < 3)
- vsize = hsize;
- else
- vsize = (hsize * 10) / 16;
- } else if (aspect_ratio == 1)
- vsize = (hsize * 3) / 4;
- else if (aspect_ratio == 2)
- vsize = (hsize * 4) / 5;
- else
- vsize = (hsize * 9) / 16;
-
- /* HDTV hack, part 1 */
- if (vrefresh_rate == 60 &&
- ((hsize == 1360 && vsize == 765) ||
- (hsize == 1368 && vsize == 769))) {
- hsize = 1366;
- vsize = 768;
- }
-
- /*
- * If this connector already has a mode for this size and refresh
- * rate (because it came from detailed or CVT info), use that
- * instead. This way we don't have to guess at interlace or
- * reduced blanking.
- */
- list_for_each_entry(m, &connector->probed_modes, head)
- if (m->hdisplay == hsize && m->vdisplay == vsize &&
- drm_mode_vrefresh(m) == vrefresh_rate)
- return NULL;
-
- /* HDTV hack, part 2 */
- if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
- mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
- false);
- mode->hdisplay = 1366;
- mode->hsync_start = mode->hsync_start - 1;
- mode->hsync_end = mode->hsync_end - 1;
- return mode;
- }
-
- /* check whether it can be found in default mode table */
- mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
- if (mode)
- return mode;
-
- switch (timing_level) {
- case LEVEL_DMT:
- break;
- case LEVEL_GTF:
- mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
- break;
- case LEVEL_GTF2:
- /*
- * This is potentially wrong if there's ever a monitor with
- * more than one ranges section, each claiming a different
- * secondary GTF curve. Please don't do that.
- */
- mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
- if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
- kfree(mode);
- mode = drm_gtf_mode_complex(dev, hsize, vsize,
- vrefresh_rate, 0, 0,
- drm_gtf2_m(edid),
- drm_gtf2_2c(edid),
- drm_gtf2_k(edid),
- drm_gtf2_2j(edid));
- }
- break;
- case LEVEL_CVT:
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
- false);
- break;
- }
- return mode;
-}
-
-/*
- * EDID is delightfully ambiguous about how interlaced modes are to be
- * encoded. Our internal representation is of frame height, but some
- * HDTV detailed timings are encoded as field height.
- *
- * The format list here is from CEA, in frame size. Technically we
- * should be checking refresh rate too. Whatever.
- */
-static void
-drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
- struct detailed_pixel_timing *pt)
-{
- int i;
- static const struct {
- int w, h;
- } cea_interlaced[] = {
- { 1920, 1080 },
- { 720, 480 },
- { 1440, 480 },
- { 2880, 480 },
- { 720, 576 },
- { 1440, 576 },
- { 2880, 576 },
- };
-
- if (!(pt->misc & DRM_EDID_PT_INTERLACED))
- return;
-
- for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
- if ((mode->hdisplay == cea_interlaced[i].w) &&
- (mode->vdisplay == cea_interlaced[i].h / 2)) {
- mode->vdisplay *= 2;
- mode->vsync_start *= 2;
- mode->vsync_end *= 2;
- mode->vtotal *= 2;
- mode->vtotal |= 1;
- }
- }
-
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
-}
-
-/**
- * drm_mode_detailed - create a new mode from an EDID detailed timing section
- * @dev: DRM device (needed to create new mode)
- * @edid: EDID block
- * @timing: EDID detailed timing info
- * @quirks: quirks to apply
- *
- * An EDID detailed timing block contains enough info for us to create and
- * return a new struct drm_display_mode.
- */
-static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
- struct edid *edid,
- struct detailed_timing *timing,
- u32 quirks)
-{
- struct drm_display_mode *mode;
- struct detailed_pixel_timing *pt = &timing->data.pixel_data;
- unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
- unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
- unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
- unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
- unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
- unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
- unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
-
- /* ignore tiny modes */
- if (hactive < 64 || vactive < 64)
- return NULL;
-
- if (pt->misc & DRM_EDID_PT_STEREO) {
- printk(KERN_WARNING "stereo mode not supported\n");
- return NULL;
- }
- if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- printk(KERN_WARNING "composite sync not supported\n");
- }
-
- /* it is incorrect if hsync/vsync width is zero */
- if (!hsync_pulse_width || !vsync_pulse_width) {
- DRM_DEBUG_KMS("Incorrect Detailed timing. "
- "Wrong Hsync/Vsync pulse width\n");
- return NULL;
- }
- mode = drm_mode_create(dev);
- if (!mode)
- return NULL;
-
- mode->type = DRM_MODE_TYPE_DRIVER;
-
- if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
- timing->pixel_clock = cpu_to_le16(1088);
-
- mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
-
- mode->hdisplay = hactive;
- mode->hsync_start = mode->hdisplay + hsync_offset;
- mode->hsync_end = mode->hsync_start + hsync_pulse_width;
- mode->htotal = mode->hdisplay + hblank;
-
- mode->vdisplay = vactive;
- mode->vsync_start = mode->vdisplay + vsync_offset;
- mode->vsync_end = mode->vsync_start + vsync_pulse_width;
- mode->vtotal = mode->vdisplay + vblank;
-
- /* Some EDIDs have bogus h/vtotal values */
- if (mode->hsync_end > mode->htotal)
- mode->htotal = mode->hsync_end + 1;
- if (mode->vsync_end > mode->vtotal)
- mode->vtotal = mode->vsync_end + 1;
-
- drm_mode_do_interlace_quirk(mode, pt);
-
- drm_mode_set_name(mode);
-
- if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
- pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
- }
-
- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
- mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
-
- mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
- mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
-
- if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
- mode->width_mm *= 10;
- mode->height_mm *= 10;
- }
-
- if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
- mode->width_mm = edid->width_cm * 10;
- mode->height_mm = edid->height_cm * 10;
- }
-
- return mode;
-}
-
-static bool
-mode_is_rb(struct drm_display_mode *mode)
-{
- return (mode->htotal - mode->hdisplay == 160) &&
- (mode->hsync_end - mode->hdisplay == 80) &&
- (mode->hsync_end - mode->hsync_start == 32) &&
- (mode->vsync_start - mode->vdisplay == 3);
-}
-
-static bool
-mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
-{
- int hsync, hmin, hmax;
-
- hmin = t[7];
- if (edid->revision >= 4)
- hmin += ((t[4] & 0x04) ? 255 : 0);
- hmax = t[8];
- if (edid->revision >= 4)
- hmax += ((t[4] & 0x08) ? 255 : 0);
- hsync = drm_mode_hsync(mode);
-
- return (hsync <= hmax && hsync >= hmin);
-}
-
-static bool
-mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
-{
- int vsync, vmin, vmax;
-
- vmin = t[5];
- if (edid->revision >= 4)
- vmin += ((t[4] & 0x01) ? 255 : 0);
- vmax = t[6];
- if (edid->revision >= 4)
- vmax += ((t[4] & 0x02) ? 255 : 0);
- vsync = drm_mode_vrefresh(mode);
-
- return (vsync <= vmax && vsync >= vmin);
-}
-
-static u32
-range_pixel_clock(struct edid *edid, u8 *t)
-{
- /* unspecified */
- if (t[9] == 0 || t[9] == 255)
- return 0;
-
- /* 1.4 with CVT support gives us real precision, yay */
- if (edid->revision >= 4 && t[10] == 0x04)
- return (t[9] * 10000) - ((t[12] >> 2) * 250);
-
- /* 1.3 is pathetic, so fuzz up a bit */
- return t[9] * 10000 + 5001;
-}
-
-static bool
-mode_in_range(struct drm_display_mode *mode, struct edid *edid,
- struct detailed_timing *timing)
-{
- u32 max_clock;
- u8 *t = (u8 *)timing;
-
- if (!mode_in_hsync_range(mode, edid, t))
- return false;
-
- if (!mode_in_vsync_range(mode, edid, t))
- return false;
-
- if ((max_clock = range_pixel_clock(edid, t)))
- if (mode->clock > max_clock)
- return false;
-
- /* 1.4 max horizontal check */
- if (edid->revision >= 4 && t[10] == 0x04)
- if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
- return false;
-
- if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
- return false;
-
- return true;
-}
-
-/*
- * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
- * need to account for them.
- */
-static int
-drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
- struct detailed_timing *timing)
-{
- int i, modes = 0;
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
-
- for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
- newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- }
-
- return modes;
-}
-
-static void
-do_inferred_modes(struct detailed_timing *timing, void *c)
-{
- struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
- int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
-
- if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
- closure->modes += drm_gtf_modes_for_range(closure->connector,
- closure->edid,
- timing);
-}
-
-static int
-add_inferred_modes(struct drm_connector *connector, struct edid *edid)
-{
- struct detailed_mode_closure closure = {
- connector, edid, 0, 0, 0
- };
-
- if (version_greater(edid, 1, 0))
- drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
- &closure);
-
- return closure.modes;
-}
-
-static int
-drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
-{
- int i, j, m, modes = 0;
- struct drm_display_mode *mode;
- u8 *est = ((u8 *)timing) + 5;
-
- for (i = 0; i < 6; i++) {
- for (j = 7; j > 0; j--) {
- m = (i * 8) + (7 - j);
- if (m >= ARRAY_SIZE(est3_modes))
- break;
- if (est[i] & (1 << j)) {
- mode = drm_mode_find_dmt(connector->dev,
- est3_modes[m].w,
- est3_modes[m].h,
- est3_modes[m].r
- /*, est3_modes[m].rb */);
- if (mode) {
- drm_mode_probed_add(connector, mode);
- modes++;
- }
- }
- }
- }
-
- return modes;
-}
-
-static void
-do_established_modes(struct detailed_timing *timing, void *c)
-{
- struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
-
- if (data->type == EDID_DETAIL_EST_TIMINGS)
- closure->modes += drm_est3_modes(closure->connector, timing);
-}
-
-/**
- * add_established_modes - get est. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Each EDID block contains a bitmap of the supported "established modes" list
- * (defined above). Tease them out and add them to the global modes list.
- */
-static int
-add_established_modes(struct drm_connector *connector, struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- unsigned long est_bits = edid->established_timings.t1 |
- (edid->established_timings.t2 << 8) |
- ((edid->established_timings.mfg_rsvd & 0x80) << 9);
- int i, modes = 0;
- struct detailed_mode_closure closure = {
- connector, edid, 0, 0, 0
- };
-
- for (i = 0; i <= EDID_EST_TIMINGS; i++) {
- if (est_bits & (1<<i)) {
- struct drm_display_mode *newmode;
- newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- }
-
- if (version_greater(edid, 1, 0))
- drm_for_each_detailed_block((u8 *)edid,
- do_established_modes, &closure);
-
- return modes + closure.modes;
-}
-
-static void
-do_standard_modes(struct detailed_timing *timing, void *c)
-{
- struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
- struct drm_connector *connector = closure->connector;
- struct edid *edid = closure->edid;
-
- if (data->type == EDID_DETAIL_STD_MODES) {
- int i;
- for (i = 0; i < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[i];
- newmode = drm_mode_std(connector, edid, std,
- edid->revision);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- closure->modes++;
- }
- }
- }
-}
-
-/**
- * add_standard_modes - get std. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Standard modes can be calculated using the appropriate standard (DMT,
- * GTF or CVT. Grab them from @edid and add them to the list.
- */
-static int
-add_standard_modes(struct drm_connector *connector, struct edid *edid)
-{
- int i, modes = 0;
- struct detailed_mode_closure closure = {
- connector, edid, 0, 0, 0
- };
-
- for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct drm_display_mode *newmode;
-
- newmode = drm_mode_std(connector, edid,
- &edid->standard_timings[i],
- edid->revision);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
-
- if (version_greater(edid, 1, 0))
- drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
- &closure);
-
- /* XXX should also look for standard codes in VTB blocks */
-
- return modes + closure.modes;
-}
-
-static int drm_cvt_modes(struct drm_connector *connector,
- struct detailed_timing *timing)
-{
- int i, j, modes = 0;
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
- struct cvt_timing *cvt;
- const int rates[] = { 60, 85, 75, 60, 50 };
- const u8 empty[3] = { 0, 0, 0 };
-
- for (i = 0; i < 4; i++) {
- int uninitialized_var(width), height;
- cvt = &(timing->data.other_data.data.cvt[i]);
-
- if (!memcmp(cvt->code, empty, 3))
- continue;
-
- height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
- switch (cvt->code[1] & 0x0c) {
- case 0x00:
- width = height * 4 / 3;
- break;
- case 0x04:
- width = height * 16 / 9;
- break;
- case 0x08:
- width = height * 16 / 10;
- break;
- case 0x0c:
- width = height * 15 / 9;
- break;
- }
-
- for (j = 1; j < 5; j++) {
- if (cvt->code[2] & (1 << j)) {
- newmode = drm_cvt_mode(dev, width, height,
- rates[j], j == 0,
- false, false);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- }
- }
-
- return modes;
-}
-
-static void
-do_cvt_mode(struct detailed_timing *timing, void *c)
-{
- struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
-
- if (data->type == EDID_DETAIL_CVT_3BYTE)
- closure->modes += drm_cvt_modes(closure->connector, timing);
-}
-
-static int
-add_cvt_modes(struct drm_connector *connector, struct edid *edid)
-{
- struct detailed_mode_closure closure = {
- connector, edid, 0, 0, 0
- };
-
- if (version_greater(edid, 1, 2))
- drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
-
- /* XXX should also look for CVT codes in VTB blocks */
-
- return closure.modes;
-}
-
-static void
-do_detailed_mode(struct detailed_timing *timing, void *c)
-{
- struct detailed_mode_closure *closure = c;
- struct drm_display_mode *newmode;
-
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(closure->connector->dev,
- closure->edid, timing,
- closure->quirks);
- if (!newmode)
- return;
-
- if (closure->preferred)
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
-
- drm_mode_probed_add(closure->connector, newmode);
- closure->modes++;
- closure->preferred = 0;
- }
-}
-
-/*
- * add_detailed_modes - Add modes from detailed timings
- * @connector: attached connector
- * @edid: EDID block to scan
- * @quirks: quirks to apply
- */
-static int
-add_detailed_modes(struct drm_connector *connector, struct edid *edid,
- u32 quirks)
-{
- struct detailed_mode_closure closure = {
- connector,
- edid,
- 1,
- quirks,
- 0
- };
-
- if (closure.preferred && !version_greater(edid, 1, 3))
- closure.preferred =
- (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
-
- drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
-
- return closure.modes;
-}
-
-#define HDMI_IDENTIFIER 0x000C03
-#define AUDIO_BLOCK 0x01
-#define VENDOR_BLOCK 0x03
-#define EDID_BASIC_AUDIO (1 << 6)
-
-/**
- * Search EDID for CEA extension block.
- */
-static u8 *drm_find_cea_extension(struct edid *edid)
-{
- u8 *edid_ext = NULL;
- int i;
-
- /* No EDID or EDID extensions */
- if (edid == NULL || edid->extensions == 0)
- return NULL;
-
- /* Find CEA extension */
- for (i = 0; i < edid->extensions; i++) {
- edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
- if (edid_ext[0] == CEA_EXT)
- break;
- }
-
- if (i == edid->extensions)
- return NULL;
-
- return edid_ext;
-}
-
-/**
- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
- * @edid: monitor EDID information
- *
- * Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI, false if not or unknown.
- */
-bool drm_detect_hdmi_monitor(struct edid *edid)
-{
- u8 *edid_ext;
- int i, hdmi_id;
- int start_offset, end_offset;
- bool is_hdmi = false;
-
- edid_ext = drm_find_cea_extension(edid);
- if (!edid_ext)
- goto end;
-
- /* Data block offset in CEA extension block */
- start_offset = 4;
- end_offset = edid_ext[2];
-
- /*
- * Because HDMI identifier is in Vendor Specific Block,
- * search it from all data blocks of CEA extension.
- */
- for (i = start_offset; i < end_offset;
- /* Increased by data block len */
- i += ((edid_ext[i] & 0x1f) + 1)) {
- /* Find vendor specific block */
- if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
- hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
- edid_ext[i + 3] << 16;
- /* Find HDMI identifier */
- if (hdmi_id == HDMI_IDENTIFIER)
- is_hdmi = true;
- break;
- }
- }
-
-end:
- return is_hdmi;
-}
-EXPORT_SYMBOL(drm_detect_hdmi_monitor);
-
-/**
- * drm_detect_monitor_audio - check monitor audio capability
- *
- * Monitor should have CEA extension block.
- * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
- * audio' only. If there is any audio extension block and supported
- * audio format, assume at least 'basic audio' support, even if 'basic
- * audio' is not defined in EDID.
- *
- */
-bool drm_detect_monitor_audio(struct edid *edid)
-{
- u8 *edid_ext;
- int i, j;
- bool has_audio = false;
- int start_offset, end_offset;
-
- edid_ext = drm_find_cea_extension(edid);
- if (!edid_ext)
- goto end;
-
- has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
-
- if (has_audio) {
- DRM_DEBUG_KMS("Monitor has basic audio support\n");
- goto end;
- }
-
- /* Data block offset in CEA extension block */
- start_offset = 4;
- end_offset = edid_ext[2];
-
- for (i = start_offset; i < end_offset;
- i += ((edid_ext[i] & 0x1f) + 1)) {
- if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
- has_audio = true;
- for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
- DRM_DEBUG_KMS("CEA audio format %d\n",
- (edid_ext[i + j] >> 3) & 0xf);
- goto end;
- }
- }
-end:
- return has_audio;
-}
-EXPORT_SYMBOL(drm_detect_monitor_audio);
-
-/**
- * drm_add_edid_modes - add modes from EDID data, if available
- * @connector: connector we're probing
- * @edid: edid data
- *
- * Add the specified modes to the connector's mode list.
- *
- * Return number of modes added or 0 if we couldn't find any.
- */
-int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
-{
- int num_modes = 0;
- u32 quirks;
-
- if (edid == NULL) {
- return 0;
- }
- if (!drm_edid_is_valid(edid)) {
- dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return 0;
- }
-
- quirks = edid_get_quirks(edid);
-
- /*
- * EDID spec says modes should be preferred in this order:
- * - preferred detailed mode
- * - other detailed modes from base block
- * - detailed modes from extension blocks
- * - CVT 3-byte code modes
- * - standard timing codes
- * - established timing codes
- * - modes inferred from GTF or CVT range information
- *
- * We get this pretty much right.
- *
- * XXX order for additional mode types in extension blocks?
- */
- num_modes += add_detailed_modes(connector, edid, quirks);
- num_modes += add_cvt_modes(connector, edid);
- num_modes += add_standard_modes(connector, edid);
- num_modes += add_established_modes(connector, edid);
- num_modes += add_inferred_modes(connector, edid);
-
- if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
- edid_fixup_preferred(connector, quirks);
-
- connector->display_info.width_mm = edid->width_cm * 10;
- connector->display_info.height_mm = edid->height_cm * 10;
-
- return num_modes;
-}
-EXPORT_SYMBOL(drm_add_edid_modes);
-
-/**
- * drm_add_modes_noedid - add modes for the connectors without EDID
- * @connector: connector we're probing
- * @hdisplay: the horizontal display limit
- * @vdisplay: the vertical display limit
- *
- * Add the specified modes to the connector's mode list. Only when the
- * hdisplay/vdisplay is not beyond the given limit, it will be added.
- *
- * Return number of modes added or 0 if we couldn't find any.
- */
-int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay)
-{
- int i, count, num_modes = 0;
- struct drm_display_mode *mode, *ptr;
- struct drm_device *dev = connector->dev;
-
- count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
- if (hdisplay < 0)
- hdisplay = 0;
- if (vdisplay < 0)
- vdisplay = 0;
-
- for (i = 0; i < count; i++) {
- ptr = &drm_dmt_modes[i];
- if (hdisplay && vdisplay) {
- /*
- * Only when two are valid, they will be used to check
- * whether the mode should be added to the mode list of
- * the connector.
- */
- if (ptr->hdisplay > hdisplay ||
- ptr->vdisplay > vdisplay)
- continue;
- }
- if (drm_mode_vrefresh(ptr) > 61)
- continue;
- mode = drm_mode_duplicate(dev, ptr);
- if (mode) {
- drm_mode_probed_add(connector, mode);
- num_modes++;
- }
- }
- return num_modes;
-}
-EXPORT_SYMBOL(drm_add_modes_noedid);
+++ /dev/null
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drm_encoder_slave.h"
-
-/**
- * drm_i2c_encoder_init - Initialize an I2C slave encoder
- * @dev: DRM device.
- * @encoder: Encoder to be attached to the I2C device. You aren't
- * required to have called drm_encoder_init() before.
- * @adap: I2C adapter that will be used to communicate with
- * the device.
- * @info: Information that will be used to create the I2C device.
- * Required fields are @addr and @type.
- *
- * Create an I2C device on the specified bus (the module containing its
- * driver is transparently loaded) and attach it to the specified
- * &drm_encoder_slave. The @slave_funcs field will be initialized with
- * the hooks provided by the slave driver.
- *
- * If @info->platform_data is non-NULL it will be used as the initial
- * slave config.
- *
- * Returns 0 on success or a negative errno on failure, in particular,
- * -ENODEV is returned when no matching driver is found.
- */
-int drm_i2c_encoder_init(struct drm_device *dev,
- struct drm_encoder_slave *encoder,
- struct i2c_adapter *adap,
- const struct i2c_board_info *info)
-{
- char modalias[sizeof(I2C_MODULE_PREFIX)
- + I2C_NAME_SIZE];
- struct module *module = NULL;
- struct i2c_client *client;
- struct drm_i2c_encoder_driver *encoder_drv;
- int err = 0;
-
- snprintf(modalias, sizeof(modalias),
- "%s%s", I2C_MODULE_PREFIX, info->type);
- request_module(modalias);
-
- client = i2c_new_device(adap, info);
- if (!client) {
- err = -ENOMEM;
- goto fail;
- }
-
- if (!client->driver) {
- err = -ENODEV;
- goto fail_unregister;
- }
-
- module = client->driver->driver.owner;
- if (!try_module_get(module)) {
- err = -ENODEV;
- goto fail_unregister;
- }
-
- encoder->bus_priv = client;
-
- encoder_drv = to_drm_i2c_encoder_driver(client->driver);
-
- err = encoder_drv->encoder_init(client, dev, encoder);
- if (err)
- goto fail_unregister;
-
- if (info->platform_data)
- encoder->slave_funcs->set_config(&encoder->base,
- info->platform_data);
-
- return 0;
-
-fail_unregister:
- i2c_unregister_device(client);
- module_put(module);
-fail:
- return err;
-}
-EXPORT_SYMBOL(drm_i2c_encoder_init);
-
-/**
- * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
- * @drm_encoder: Encoder to be unregistered.
- *
- * This should be called from the @destroy method of an I2C slave
- * encoder driver once I2C access is no longer needed.
- */
-void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
-{
- struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
- struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
- struct module *module = client->driver->driver.owner;
-
- i2c_unregister_device(client);
- encoder->bus_priv = NULL;
-
- module_put(module);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_destroy);
+++ /dev/null
-/*
- * Copyright (c) 2006-2009 Red Hat Inc.
- * Copyright (c) 2006-2008 Intel Corporation
- * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- *
- * DRM framebuffer helper functions
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- *
- * Authors:
- * Dave Airlie <airlied@linux.ie>
- * Jesse Barnes <jesse.barnes@intel.com>
- */
-#include <linux/kernel.h>
-#include <linux/sysrq.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "drmP.h"
-#include "drm_crtc.h"
-#include "drm_fb_helper.h"
-#include "drm_crtc_helper.h"
-
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
-MODULE_LICENSE("GPL and additional rights");
-
-static LIST_HEAD(kernel_fb_helper_list);
-
-/* simple single crtc case helper function */
-int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_connector *connector;
- int i;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_fb_helper_connector *fb_helper_connector;
-
- fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
- if (!fb_helper_connector)
- goto fail;
-
- fb_helper_connector->connector = connector;
- fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
- }
- return 0;
-fail:
- for (i = 0; i < fb_helper->connector_count; i++) {
- kfree(fb_helper->connector_info[i]);
- fb_helper->connector_info[i] = NULL;
- }
- fb_helper->connector_count = 0;
- return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
-
-/**
- * drm_fb_helper_connector_parse_command_line - parse command line for connector
- * @connector - connector to parse line for
- * @mode_option - per connector mode option
- *
- * This parses the connector specific then generic command lines for
- * modes and options to configure the connector.
- *
- * This uses the same parameters as the fb modedb.c, except for extra
- * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
- *
- * enable/enable Digital/disable bit at the end
- */
-static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
- const char *mode_option)
-{
- const char *name;
- unsigned int namelen;
- int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
- unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
- int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
- int i;
- enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_connector *connector;
-
- if (!fb_helper_conn)
- return false;
- connector = fb_helper_conn->connector;
-
- cmdline_mode = &fb_helper_conn->cmdline_mode;
- if (!mode_option)
- mode_option = fb_mode_option;
-
- if (!mode_option) {
- cmdline_mode->specified = false;
- return false;
- }
-
- name = mode_option;
- namelen = strlen(name);
- for (i = namelen-1; i >= 0; i--) {
- switch (name[i]) {
- case '@':
- namelen = i;
- if (!refresh_specified && !bpp_specified &&
- !yres_specified) {
- refresh = simple_strtol(&name[i+1], NULL, 10);
- refresh_specified = 1;
- if (cvt || rb)
- cvt = 0;
- } else
- goto done;
- break;
- case '-':
- namelen = i;
- if (!bpp_specified && !yres_specified) {
- bpp = simple_strtol(&name[i+1], NULL, 10);
- bpp_specified = 1;
- if (cvt || rb)
- cvt = 0;
- } else
- goto done;
- break;
- case 'x':
- if (!yres_specified) {
- yres = simple_strtol(&name[i+1], NULL, 10);
- yres_specified = 1;
- } else
- goto done;
- case '0' ... '9':
- break;
- case 'M':
- if (!yres_specified)
- cvt = 1;
- break;
- case 'R':
- if (cvt)
- rb = 1;
- break;
- case 'm':
- if (!cvt)
- margins = 1;
- break;
- case 'i':
- if (!cvt)
- interlace = 1;
- break;
- case 'e':
- force = DRM_FORCE_ON;
- break;
- case 'D':
- if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
- (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
- force = DRM_FORCE_ON;
- else
- force = DRM_FORCE_ON_DIGITAL;
- break;
- case 'd':
- force = DRM_FORCE_OFF;
- break;
- default:
- goto done;
- }
- }
- if (i < 0 && yres_specified) {
- xres = simple_strtol(name, NULL, 10);
- res_specified = 1;
- }
-done:
-
- DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
- drm_get_connector_name(connector), xres, yres,
- (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
- "", (margins) ? " with margins" : "", (interlace) ?
- " interlaced" : "");
-
- if (force) {
- const char *s;
- switch (force) {
- case DRM_FORCE_OFF: s = "OFF"; break;
- case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
- default:
- case DRM_FORCE_ON: s = "ON"; break;
- }
-
- DRM_INFO("forcing %s connector %s\n",
- drm_get_connector_name(connector), s);
- connector->force = force;
- }
-
- if (res_specified) {
- cmdline_mode->specified = true;
- cmdline_mode->xres = xres;
- cmdline_mode->yres = yres;
- }
-
- if (refresh_specified) {
- cmdline_mode->refresh_specified = true;
- cmdline_mode->refresh = refresh;
- }
-
- if (bpp_specified) {
- cmdline_mode->bpp_specified = true;
- cmdline_mode->bpp = bpp;
- }
- cmdline_mode->rb = rb ? true : false;
- cmdline_mode->cvt = cvt ? true : false;
- cmdline_mode->interlace = interlace ? true : false;
-
- return true;
-}
-
-static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
-{
- struct drm_fb_helper_connector *fb_helper_conn;
- int i;
-
- for (i = 0; i < fb_helper->connector_count; i++) {
- char *option = NULL;
-
- fb_helper_conn = fb_helper->connector_info[i];
-
- /* do something on return - turn off connector maybe */
- if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
- continue;
-
- drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
- }
- return 0;
-}
-
-static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
-{
- uint16_t *r_base, *g_base, *b_base;
- int i;
-
- r_base = crtc->gamma_store;
- g_base = r_base + crtc->gamma_size;
- b_base = g_base + crtc->gamma_size;
-
- for (i = 0; i < crtc->gamma_size; i++)
- helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
-}
-
-static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
-{
- uint16_t *r_base, *g_base, *b_base;
-
- r_base = crtc->gamma_store;
- g_base = r_base + crtc->gamma_size;
- b_base = g_base + crtc->gamma_size;
-
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
-}
-
-int drm_fb_helper_debug_enter(struct fb_info *info)
-{
- struct drm_fb_helper *helper = info->par;
- struct drm_crtc_helper_funcs *funcs;
- int i;
-
- if (list_empty(&kernel_fb_helper_list))
- return false;
-
- list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
- for (i = 0; i < helper->crtc_count; i++) {
- struct drm_mode_set *mode_set =
- &helper->crtc_info[i].mode_set;
-
- if (!mode_set->crtc->enabled)
- continue;
-
- funcs = mode_set->crtc->helper_private;
- drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
- funcs->mode_set_base_atomic(mode_set->crtc,
- mode_set->fb,
- mode_set->x,
- mode_set->y,
- ENTER_ATOMIC_MODE_SET);
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_debug_enter);
-
-/* Find the real fb for a given fb helper CRTC */
-static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_crtc *c;
-
- list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
- if (crtc->base.id == c->base.id)
- return c->fb;
- }
-
- return NULL;
-}
-
-int drm_fb_helper_debug_leave(struct fb_info *info)
-{
- struct drm_fb_helper *helper = info->par;
- struct drm_crtc *crtc;
- struct drm_crtc_helper_funcs *funcs;
- struct drm_framebuffer *fb;
- int i;
-
- for (i = 0; i < helper->crtc_count; i++) {
- struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
- crtc = mode_set->crtc;
- funcs = crtc->helper_private;
- fb = drm_mode_config_fb(crtc);
-
- if (!crtc->enabled)
- continue;
-
- if (!fb) {
- DRM_ERROR("no fb to restore??\n");
- continue;
- }
-
- drm_fb_helper_restore_lut_atomic(mode_set->crtc);
- funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
- crtc->y, LEAVE_ATOMIC_MODE_SET);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_debug_leave);
-
-bool drm_fb_helper_force_kernel_mode(void)
-{
- int i = 0;
- bool ret, error = false;
- struct drm_fb_helper *helper;
-
- if (list_empty(&kernel_fb_helper_list))
- return false;
-
- list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
- for (i = 0; i < helper->crtc_count; i++) {
- struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
- ret = drm_crtc_helper_set_config(mode_set);
- if (ret)
- error = true;
- }
- }
- return error;
-}
-
-int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
- void *panic_str)
-{
- printk(KERN_ERR "panic occurred, switching back to text console\n");
- return drm_fb_helper_force_kernel_mode();
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_panic);
-
-static struct notifier_block paniced = {
- .notifier_call = drm_fb_helper_panic,
-};
-
-/**
- * drm_fb_helper_restore - restore the framebuffer console (kernel) config
- *
- * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
- */
-void drm_fb_helper_restore(void)
-{
- bool ret;
- ret = drm_fb_helper_force_kernel_mode();
- if (ret == true)
- DRM_ERROR("Failed to restore crtc configuration\n");
-}
-EXPORT_SYMBOL(drm_fb_helper_restore);
-
-#ifdef CONFIG_MAGIC_SYSRQ
-static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
-{
- drm_fb_helper_restore();
-}
-static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
-
-static void drm_fb_helper_sysrq(int dummy1)
-{
- schedule_work(&drm_fb_helper_restore_work);
-}
-
-static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
- .handler = drm_fb_helper_sysrq,
- .help_msg = "force-fb(V)",
- .action_msg = "Restore framebuffer console",
-};
-#else
-static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
-#endif
-
-static void drm_fb_helper_on(struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- struct drm_crtc_helper_funcs *crtc_funcs;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- int i, j;
-
- /*
- * For each CRTC in this fb, turn the crtc on then,
- * find all associated encoders and turn them on.
- */
- mutex_lock(&dev->mode_config.mutex);
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
- crtc_funcs = crtc->helper_private;
-
- if (!crtc->enabled)
- continue;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
- /* Walk the connectors & encoders on this fb turning them on */
- for (j = 0; j < fb_helper->connector_count; j++) {
- connector = fb_helper->connector_info[j]->connector;
- connector->dpms = DRM_MODE_DPMS_ON;
- drm_connector_property_set_value(connector,
- dev->mode_config.dpms_property,
- DRM_MODE_DPMS_ON);
- }
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
-
- encoder_funcs = encoder->helper_private;
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- }
- }
- }
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- struct drm_crtc_helper_funcs *crtc_funcs;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- int i, j;
-
- /*
- * For each CRTC in this fb, find all associated encoders
- * and turn them off, then turn off the CRTC.
- */
- mutex_lock(&dev->mode_config.mutex);
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
- crtc_funcs = crtc->helper_private;
-
- if (!crtc->enabled)
- continue;
-
- /* Walk the connectors on this fb and mark them off */
- for (j = 0; j < fb_helper->connector_count; j++) {
- connector = fb_helper->connector_info[j]->connector;
- connector->dpms = dpms_mode;
- drm_connector_property_set_value(connector,
- dev->mode_config.dpms_property,
- dpms_mode);
- }
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
-
- encoder_funcs = encoder->helper_private;
- encoder_funcs->dpms(encoder, dpms_mode);
- }
- }
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- }
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-int drm_fb_helper_blank(int blank, struct fb_info *info)
-{
- switch (blank) {
- /* Display: On; HSync: On, VSync: On */
- case FB_BLANK_UNBLANK:
- drm_fb_helper_on(info);
- break;
- /* Display: Off; HSync: On, VSync: On */
- case FB_BLANK_NORMAL:
- drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
- break;
- /* Display: Off; HSync: Off, VSync: On */
- case FB_BLANK_HSYNC_SUSPEND:
- drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
- break;
- /* Display: Off; HSync: On, VSync: Off */
- case FB_BLANK_VSYNC_SUSPEND:
- drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
- break;
- /* Display: Off; HSync: Off, VSync: Off */
- case FB_BLANK_POWERDOWN:
- drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_blank);
-
-static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
-{
- int i;
-
- for (i = 0; i < helper->connector_count; i++)
- kfree(helper->connector_info[i]);
- kfree(helper->connector_info);
- for (i = 0; i < helper->crtc_count; i++)
- kfree(helper->crtc_info[i].mode_set.connectors);
- kfree(helper->crtc_info);
-}
-
-int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- int crtc_count, int max_conn_count)
-{
- struct drm_crtc *crtc;
- int ret = 0;
- int i;
-
- fb_helper->dev = dev;
-
- INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
-
- fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
- if (!fb_helper->crtc_info)
- return -ENOMEM;
-
- fb_helper->crtc_count = crtc_count;
- fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
- if (!fb_helper->connector_info) {
- kfree(fb_helper->crtc_info);
- return -ENOMEM;
- }
- fb_helper->connector_count = 0;
-
- for (i = 0; i < crtc_count; i++) {
- fb_helper->crtc_info[i].mode_set.connectors =
- kcalloc(max_conn_count,
- sizeof(struct drm_connector *),
- GFP_KERNEL);
-
- if (!fb_helper->crtc_info[i].mode_set.connectors) {
- ret = -ENOMEM;
- goto out_free;
- }
- fb_helper->crtc_info[i].mode_set.num_connectors = 0;
- }
-
- i = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- fb_helper->crtc_info[i].crtc_id = crtc->base.id;
- fb_helper->crtc_info[i].mode_set.crtc = crtc;
- i++;
- }
- fb_helper->conn_limit = max_conn_count;
- return 0;
-out_free:
- drm_fb_helper_crtc_free(fb_helper);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_fb_helper_init);
-
-void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
-{
- if (!list_empty(&fb_helper->kernel_fb_list)) {
- list_del(&fb_helper->kernel_fb_list);
- if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: unregistered panic notifier\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &paniced);
- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
- }
-
- drm_fb_helper_crtc_free(fb_helper);
-
-}
-EXPORT_SYMBOL(drm_fb_helper_fini);
-
-static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, u16 regno, struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- int pindex;
-
- if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
- u32 *palette;
- u32 value;
- /* place color in psuedopalette */
- if (regno > 16)
- return -EINVAL;
- palette = (u32 *)info->pseudo_palette;
- red >>= (16 - info->var.red.length);
- green >>= (16 - info->var.green.length);
- blue >>= (16 - info->var.blue.length);
- value = (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset);
- palette[regno] = value;
- return 0;
- }
-
- pindex = regno;
-
- if (fb->bits_per_pixel == 16) {
- pindex = regno << 3;
-
- if (fb->depth == 16 && regno > 63)
- return -EINVAL;
- if (fb->depth == 15 && regno > 31)
- return -EINVAL;
-
- if (fb->depth == 16) {
- u16 r, g, b;
- int i;
- if (regno < 32) {
- for (i = 0; i < 8; i++)
- fb_helper->funcs->gamma_set(crtc, red,
- green, blue, pindex + i);
- }
-
- fb_helper->funcs->gamma_get(crtc, &r,
- &g, &b,
- pindex >> 1);
-
- for (i = 0; i < 4; i++)
- fb_helper->funcs->gamma_set(crtc, r,
- green, b,
- (pindex >> 1) + i);
- }
- }
-
- if (fb->depth != 16)
- fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
- return 0;
-}
-
-int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_crtc_helper_funcs *crtc_funcs;
- u16 *red, *green, *blue, *transp;
- struct drm_crtc *crtc;
- int i, rc = 0;
- int start;
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
- crtc_funcs = crtc->helper_private;
-
- red = cmap->red;
- green = cmap->green;
- blue = cmap->blue;
- transp = cmap->transp;
- start = cmap->start;
-
- for (i = 0; i < cmap->len; i++) {
- u16 hred, hgreen, hblue, htransp = 0xffff;
-
- hred = *red++;
- hgreen = *green++;
- hblue = *blue++;
-
- if (transp)
- htransp = *transp++;
-
- rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
- if (rc)
- return rc;
- }
- crtc_funcs->load_lut(crtc);
- }
- return rc;
-}
-EXPORT_SYMBOL(drm_fb_helper_setcmap);
-
-int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- int depth;
-
- if (var->pixclock != 0 || in_dbg_master())
- return -EINVAL;
-
- /* Need to resize the fb object !!! */
- if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
- DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
- "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
- fb->width, fb->height, fb->bits_per_pixel);
- return -EINVAL;
- }
-
- switch (var->bits_per_pixel) {
- case 16:
- depth = (var->green.length == 6) ? 16 : 15;
- break;
- case 32:
- depth = (var->transp.length > 0) ? 32 : 24;
- break;
- default:
- depth = var->bits_per_pixel;
- break;
- }
-
- switch (depth) {
- case 8:
- var->red.offset = 0;
- var->green.offset = 0;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 15:
- var->red.offset = 10;
- var->green.offset = 5;
- var->blue.offset = 0;
- var->red.length = 5;
- var->green.length = 5;
- var->blue.length = 5;
- var->transp.length = 1;
- var->transp.offset = 15;
- break;
- case 16:
- var->red.offset = 11;
- var->green.offset = 5;
- var->blue.offset = 0;
- var->red.length = 5;
- var->green.length = 6;
- var->blue.length = 5;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 24:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 32:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 8;
- var->transp.offset = 24;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_check_var);
-
-/* this will let fbcon do the mode init */
-int drm_fb_helper_set_par(struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct fb_var_screeninfo *var = &info->var;
- struct drm_crtc *crtc;
- int ret;
- int i;
-
- if (var->pixclock != 0) {
- DRM_ERROR("PIXEL CLOCK SET\n");
- return -EINVAL;
- }
-
- mutex_lock(&dev->mode_config.mutex);
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
- if (ret) {
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
- }
- }
- mutex_unlock(&dev->mode_config.mutex);
-
- if (fb_helper->delayed_hotplug) {
- fb_helper->delayed_hotplug = false;
- drm_fb_helper_hotplug_event(fb_helper);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_set_par);
-
-int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_mode_set *modeset;
- struct drm_crtc *crtc;
- int ret = 0;
- int i;
-
- mutex_lock(&dev->mode_config.mutex);
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
- modeset = &fb_helper->crtc_info[i].mode_set;
-
- modeset->x = var->xoffset;
- modeset->y = var->yoffset;
-
- if (modeset->num_connectors) {
- ret = crtc->funcs->set_config(modeset);
- if (!ret) {
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- }
- }
- }
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-EXPORT_SYMBOL(drm_fb_helper_pan_display);
-
-int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
- int preferred_bpp)
-{
- int new_fb = 0;
- int crtc_count = 0;
- int i;
- struct fb_info *info;
- struct drm_fb_helper_surface_size sizes;
- int gamma_size = 0;
-
- memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
- sizes.surface_depth = 24;
- sizes.surface_bpp = 32;
- sizes.fb_width = (unsigned)-1;
- sizes.fb_height = (unsigned)-1;
-
- /* if driver picks 8 or 16 by default use that
- for both depth/bpp */
- if (preferred_bpp != sizes.surface_bpp) {
- sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- }
- /* first up get a count of crtcs now in use and new min/maxes width/heights */
- for (i = 0; i < fb_helper->connector_count; i++) {
- struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
-
- cmdline_mode = &fb_helper_conn->cmdline_mode;
-
- if (cmdline_mode->bpp_specified) {
- switch (cmdline_mode->bpp) {
- case 8:
- sizes.surface_depth = sizes.surface_bpp = 8;
- break;
- case 15:
- sizes.surface_depth = 15;
- sizes.surface_bpp = 16;
- break;
- case 16:
- sizes.surface_depth = sizes.surface_bpp = 16;
- break;
- case 24:
- sizes.surface_depth = sizes.surface_bpp = 24;
- break;
- case 32:
- sizes.surface_depth = 24;
- sizes.surface_bpp = 32;
- break;
- }
- break;
- }
- }
-
- crtc_count = 0;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- struct drm_display_mode *desired_mode;
- desired_mode = fb_helper->crtc_info[i].desired_mode;
-
- if (desired_mode) {
- if (gamma_size == 0)
- gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
- if (desired_mode->hdisplay < sizes.fb_width)
- sizes.fb_width = desired_mode->hdisplay;
- if (desired_mode->vdisplay < sizes.fb_height)
- sizes.fb_height = desired_mode->vdisplay;
- if (desired_mode->hdisplay > sizes.surface_width)
- sizes.surface_width = desired_mode->hdisplay;
- if (desired_mode->vdisplay > sizes.surface_height)
- sizes.surface_height = desired_mode->vdisplay;
- crtc_count++;
- }
- }
-
- if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
- /* hmm everyone went away - assume VGA cable just fell out
- and will come back later. */
- DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
- sizes.fb_width = sizes.surface_width = 1024;
- sizes.fb_height = sizes.surface_height = 768;
- }
-
- /* push down into drivers */
- new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
- if (new_fb < 0)
- return new_fb;
-
- info = fb_helper->fbdev;
-
- /* set the fb pointer */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- }
-
- if (new_fb) {
- info->var.pixclock = 0;
- if (register_framebuffer(info) < 0) {
- return -EINVAL;
- }
-
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
-
- } else {
- drm_fb_helper_set_par(info);
- }
-
- /* Switch back to kernel console on panic */
- /* multi card linked list maybe */
- if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: registered panic notifier\n");
- atomic_notifier_chain_register(&panic_notifier_list,
- &paniced);
- register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
- if (new_fb)
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
-
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth)
-{
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
- FB_VISUAL_TRUECOLOR;
- info->fix.type_aux = 0;
- info->fix.xpanstep = 1; /* doing it in hw */
- info->fix.ypanstep = 1; /* doing it in hw */
- info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_NONE;
- info->fix.type_aux = 0;
-
- info->fix.line_length = pitch;
- return;
-}
-EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
- uint32_t fb_width, uint32_t fb_height)
-{
- struct drm_framebuffer *fb = fb_helper->fb;
- info->pseudo_palette = fb_helper->pseudo_palette;
- info->var.xres_virtual = fb->width;
- info->var.yres_virtual = fb->height;
- info->var.bits_per_pixel = fb->bits_per_pixel;
- info->var.xoffset = 0;
- info->var.yoffset = 0;
- info->var.activate = FB_ACTIVATE_NOW;
- info->var.height = -1;
- info->var.width = -1;
-
- switch (fb->depth) {
- case 8:
- info->var.red.offset = 0;
- info->var.green.offset = 0;
- info->var.blue.offset = 0;
- info->var.red.length = 8; /* 8bit DAC */
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 0;
- info->var.transp.length = 0;
- break;
- case 15:
- info->var.red.offset = 10;
- info->var.green.offset = 5;
- info->var.blue.offset = 0;
- info->var.red.length = 5;
- info->var.green.length = 5;
- info->var.blue.length = 5;
- info->var.transp.offset = 15;
- info->var.transp.length = 1;
- break;
- case 16:
- info->var.red.offset = 11;
- info->var.green.offset = 5;
- info->var.blue.offset = 0;
- info->var.red.length = 5;
- info->var.green.length = 6;
- info->var.blue.length = 5;
- info->var.transp.offset = 0;
- break;
- case 24:
- info->var.red.offset = 16;
- info->var.green.offset = 8;
- info->var.blue.offset = 0;
- info->var.red.length = 8;
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 0;
- info->var.transp.length = 0;
- break;
- case 32:
- info->var.red.offset = 16;
- info->var.green.offset = 8;
- info->var.blue.offset = 0;
- info->var.red.length = 8;
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 24;
- info->var.transp.length = 8;
- break;
- default:
- break;
- }
-
- info->var.xres = fb_width;
- info->var.yres = fb_height;
-}
-EXPORT_SYMBOL(drm_fb_helper_fill_var);
-
-static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
- uint32_t maxX,
- uint32_t maxY)
-{
- struct drm_connector *connector;
- int count = 0;
- int i;
-
- for (i = 0; i < fb_helper->connector_count; i++) {
- connector = fb_helper->connector_info[i]->connector;
- count += connector->funcs->fill_modes(connector, maxX, maxY);
- }
-
- return count;
-}
-
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, &fb_connector->connector->modes, head) {
- if (drm_mode_width(mode) > width ||
- drm_mode_height(mode) > height)
- continue;
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- return mode;
- }
- return NULL;
-}
-
-static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
-{
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
- cmdline_mode = &fb_connector->cmdline_mode;
- return cmdline_mode->specified;
-}
-
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
- int width, int height)
-{
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode = NULL;
-
- cmdline_mode = &fb_helper_conn->cmdline_mode;
- if (cmdline_mode->specified == false)
- return mode;
-
- /* attempt to find a matching mode in the list of modes
- * we have gotten so far, if not add a CVT mode that conforms
- */
- if (cmdline_mode->rb || cmdline_mode->margins)
- goto create_mode;
-
- list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
- /* check width/height */
- if (mode->hdisplay != cmdline_mode->xres ||
- mode->vdisplay != cmdline_mode->yres)
- continue;
-
- if (cmdline_mode->refresh_specified) {
- if (mode->vrefresh != cmdline_mode->refresh)
- continue;
- }
-
- if (cmdline_mode->interlace) {
- if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
- }
- return mode;
- }
-
-create_mode:
- if (cmdline_mode->cvt)
- mode = drm_cvt_mode(fb_helper_conn->connector->dev,
- cmdline_mode->xres, cmdline_mode->yres,
- cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
- cmdline_mode->rb, cmdline_mode->interlace,
- cmdline_mode->margins);
- else
- mode = drm_gtf_mode(fb_helper_conn->connector->dev,
- cmdline_mode->xres, cmdline_mode->yres,
- cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
- cmdline_mode->interlace,
- cmdline_mode->margins);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- list_add(&mode->head, &fb_helper_conn->connector->modes);
- return mode;
-}
-
-static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
-{
- bool enable;
-
- if (strict) {
- enable = connector->status == connector_status_connected;
- } else {
- enable = connector->status != connector_status_disconnected;
- }
- return enable;
-}
-
-static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
- bool *enabled)
-{
- bool any_enabled = false;
- struct drm_connector *connector;
- int i = 0;
-
- for (i = 0; i < fb_helper->connector_count; i++) {
- connector = fb_helper->connector_info[i]->connector;
- enabled[i] = drm_connector_enabled(connector, true);
- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
- any_enabled |= enabled[i];
- }
-
- if (any_enabled)
- return;
-
- for (i = 0; i < fb_helper->connector_count; i++) {
- connector = fb_helper->connector_info[i]->connector;
- enabled[i] = drm_connector_enabled(connector, false);
- }
-}
-
-static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
- struct drm_display_mode **modes,
- bool *enabled, int width, int height)
-{
- int count, i, j;
- bool can_clone = false;
- struct drm_fb_helper_connector *fb_helper_conn;
- struct drm_display_mode *dmt_mode, *mode;
-
- /* only contemplate cloning in the single crtc case */
- if (fb_helper->crtc_count > 1)
- return false;
-
- count = 0;
- for (i = 0; i < fb_helper->connector_count; i++) {
- if (enabled[i])
- count++;
- }
-
- /* only contemplate cloning if more than one connector is enabled */
- if (count <= 1)
- return false;
-
- /* check the command line or if nothing common pick 1024x768 */
- can_clone = true;
- for (i = 0; i < fb_helper->connector_count; i++) {
- if (!enabled[i])
- continue;
- fb_helper_conn = fb_helper->connector_info[i];
- modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
- if (!modes[i]) {
- can_clone = false;
- break;
- }
- for (j = 0; j < i; j++) {
- if (!enabled[j])
- continue;
- if (!drm_mode_equal(modes[j], modes[i]))
- can_clone = false;
- }
- }
-
- if (can_clone) {
- DRM_DEBUG_KMS("can clone using command line\n");
- return true;
- }
-
- /* try and find a 1024x768 mode on each connector */
- can_clone = true;
- dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
-
- for (i = 0; i < fb_helper->connector_count; i++) {
-
- if (!enabled[i])
- continue;
-
- fb_helper_conn = fb_helper->connector_info[i];
- list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
- if (drm_mode_equal(mode, dmt_mode))
- modes[i] = mode;
- }
- if (!modes[i])
- can_clone = false;
- }
-
- if (can_clone) {
- DRM_DEBUG_KMS("can clone using 1024x768\n");
- return true;
- }
- DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
- return false;
-}
-
-static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
- struct drm_display_mode **modes,
- bool *enabled, int width, int height)
-{
- struct drm_fb_helper_connector *fb_helper_conn;
- int i;
-
- for (i = 0; i < fb_helper->connector_count; i++) {
- fb_helper_conn = fb_helper->connector_info[i];
-
- if (enabled[i] == false)
- continue;
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- fb_helper_conn->connector->base.id);
-
- /* got for command line mode first */
- modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- fb_helper_conn->connector->base.id);
- modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
- }
- /* No preferred modes, pick one off the list */
- if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
- list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
- break;
- }
- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
- }
- return true;
-}
-
-static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_crtc **best_crtcs,
- struct drm_display_mode **modes,
- int n, int width, int height)
-{
- int c, o;
- struct drm_device *dev = fb_helper->dev;
- struct drm_connector *connector;
- struct drm_connector_helper_funcs *connector_funcs;
- struct drm_encoder *encoder;
- struct drm_fb_helper_crtc *best_crtc;
- int my_score, best_score, score;
- struct drm_fb_helper_crtc **crtcs, *crtc;
- struct drm_fb_helper_connector *fb_helper_conn;
-
- if (n == fb_helper->connector_count)
- return 0;
-
- fb_helper_conn = fb_helper->connector_info[n];
- connector = fb_helper_conn->connector;
-
- best_crtcs[n] = NULL;
- best_crtc = NULL;
- best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
- if (modes[n] == NULL)
- return best_score;
-
- crtcs = kzalloc(dev->mode_config.num_connector *
- sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
- if (!crtcs)
- return best_score;
-
- my_score = 1;
- if (connector->status == connector_status_connected)
- my_score++;
- if (drm_has_cmdline_mode(fb_helper_conn))
- my_score++;
- if (drm_has_preferred_mode(fb_helper_conn, width, height))
- my_score++;
-
- connector_funcs = connector->helper_private;
- encoder = connector_funcs->best_encoder(connector);
- if (!encoder)
- goto out;
-
- /* select a crtc for this connector and then attempt to configure
- remaining connectors */
- for (c = 0; c < fb_helper->crtc_count; c++) {
- crtc = &fb_helper->crtc_info[c];
-
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
- continue;
- }
-
- for (o = 0; o < n; o++)
- if (best_crtcs[o] == crtc)
- break;
-
- if (o < n) {
- /* ignore cloning unless only a single crtc */
- if (fb_helper->crtc_count > 1)
- continue;
-
- if (!drm_mode_equal(modes[o], modes[n]))
- continue;
- }
-
- crtcs[n] = crtc;
- memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
- score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
- width, height);
- if (score > best_score) {
- best_crtc = crtc;
- best_score = score;
- memcpy(best_crtcs, crtcs,
- dev->mode_config.num_connector *
- sizeof(struct drm_fb_helper_crtc *));
- }
- }
-out:
- kfree(crtcs);
- return best_score;
-}
-
-static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_fb_helper_crtc **crtcs;
- struct drm_display_mode **modes;
- struct drm_encoder *encoder;
- struct drm_mode_set *modeset;
- bool *enabled;
- int width, height;
- int i, ret;
-
- DRM_DEBUG_KMS("\n");
-
- width = dev->mode_config.max_width;
- height = dev->mode_config.max_height;
-
- /* clean out all the encoder/crtc combos */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder->crtc = NULL;
- }
-
- crtcs = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
- modes = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_display_mode *), GFP_KERNEL);
- enabled = kcalloc(dev->mode_config.num_connector,
- sizeof(bool), GFP_KERNEL);
-
- drm_enable_connectors(fb_helper, enabled);
-
- ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
- if (!ret) {
- ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
- if (!ret)
- DRM_ERROR("Unable to find initial modes\n");
- }
-
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
-
- drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
-
- /* need to set the modesets up here for use later */
- /* fill out the connector<->crtc mappings into the modesets */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- modeset->num_connectors = 0;
- }
-
- for (i = 0; i < fb_helper->connector_count; i++) {
- struct drm_display_mode *mode = modes[i];
- struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
- modeset = &fb_crtc->mode_set;
-
- if (mode && fb_crtc) {
- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, fb_crtc->mode_set.crtc->base.id);
- fb_crtc->desired_mode = mode;
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = drm_mode_duplicate(dev,
- fb_crtc->desired_mode);
- modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
- }
- }
-
- kfree(crtcs);
- kfree(modes);
- kfree(enabled);
-}
-
-/**
- * drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
- *
- * LOCKING:
- * Called at init time, must take mode config lock.
- *
- * Scan the CRTCs and connectors and try to put together an initial setup.
- * At the moment, this is a cloned configuration across all heads with
- * a new framebuffer object as the backing store.
- *
- * RETURNS:
- * Zero if everything went ok, nonzero otherwise.
- */
-bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
-{
- struct drm_device *dev = fb_helper->dev;
- int count = 0;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(fb_helper->dev);
-
- drm_fb_helper_parse_command_line(fb_helper);
-
- count = drm_fb_helper_probe_connector_modes(fb_helper,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
- /*
- * we shouldn't end up with no modes here.
- */
- if (count == 0) {
- printk(KERN_INFO "No connectors reported connected with modes\n");
- }
- drm_setup_crtcs(fb_helper);
-
- return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
-}
-EXPORT_SYMBOL(drm_fb_helper_initial_config);
-
-bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
-{
- int count = 0;
- u32 max_width, max_height, bpp_sel;
- bool bound = false, crtcs_bound = false;
- struct drm_crtc *crtc;
-
- if (!fb_helper->fb)
- return false;
-
- list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
- if (crtc->fb)
- crtcs_bound = true;
- if (crtc->fb == fb_helper->fb)
- bound = true;
- }
-
- if (!bound && crtcs_bound) {
- fb_helper->delayed_hotplug = true;
- return false;
- }
- DRM_DEBUG_KMS("\n");
-
- max_width = fb_helper->fb->width;
- max_height = fb_helper->fb->height;
- bpp_sel = fb_helper->fb->bits_per_pixel;
-
- count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
- max_height);
- drm_setup_crtcs(fb_helper);
-
- return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
-}
-EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
-
+++ /dev/null
-/**
- * \file drm_fops.c
- * File operations for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Daryll Strauss <daryll@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include <linux/poll.h>
-#include <linux/slab.h>
-
-/* from BKL pushdown: note that nothing else serializes idr_find() */
-DEFINE_MUTEX(drm_global_mutex);
-EXPORT_SYMBOL(drm_global_mutex);
-
-static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_device * dev);
-
-static int drm_setup(struct drm_device * dev)
-{
- int i;
- int ret;
-
- if (dev->driver->firstopen) {
- ret = dev->driver->firstopen(dev);
- if (ret != 0)
- return ret;
- }
-
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- i = drm_dma_setup(dev);
- if (i < 0)
- return i;
- }
-
- for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
- atomic_set(&dev->counts[i], 0);
-
- dev->sigdata.lock = NULL;
-
- dev->queue_count = 0;
- dev->queue_reserved = 0;
- dev->queue_slots = 0;
- dev->queuelist = NULL;
- dev->context_flag = 0;
- dev->interrupt_flag = 0;
- dev->dma_flag = 0;
- dev->last_context = 0;
- dev->last_switch = 0;
- dev->last_checked = 0;
- init_waitqueue_head(&dev->context_wait);
- dev->if_version = 0;
-
- dev->ctx_start = 0;
- dev->lck_start = 0;
-
- dev->buf_async = NULL;
- init_waitqueue_head(&dev->buf_readers);
- init_waitqueue_head(&dev->buf_writers);
-
- DRM_DEBUG("\n");
-
- /*
- * The kernel's context could be created here, but is now created
- * in drm_dma_enqueue. This is more resource-efficient for
- * hardware that does not do DMA, but may mean that
- * drm_select_queue fails between the time the interrupt is
- * initialized and the time the queues are initialized.
- */
-
- return 0;
-}
-
-/**
- * Open file.
- *
- * \param inode device inode
- * \param filp file pointer.
- * \return zero on success or a negative number on failure.
- *
- * Searches the DRM device with the same minor number, calls open_helper(), and
- * increments the device open count. If the open count was previous at zero,
- * i.e., it's the first that the device is open, then calls setup().
- */
-int drm_open(struct inode *inode, struct file *filp)
-{
- struct drm_device *dev = NULL;
- int minor_id = iminor(inode);
- struct drm_minor *minor;
- int retcode = 0;
-
- minor = idr_find(&drm_minors_idr, minor_id);
- if (!minor)
- return -ENODEV;
-
- if (!(dev = minor->dev))
- return -ENODEV;
-
- retcode = drm_open_helper(inode, filp, dev);
- if (!retcode) {
- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- if (!dev->open_count++)
- retcode = drm_setup(dev);
- }
- if (!retcode) {
- mutex_lock(&dev->struct_mutex);
- if (minor->type == DRM_MINOR_LEGACY) {
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = inode->i_mapping;
- else if (dev->dev_mapping != inode->i_mapping)
- retcode = -ENODEV;
- }
- mutex_unlock(&dev->struct_mutex);
- }
-
- return retcode;
-}
-EXPORT_SYMBOL(drm_open);
-
-/**
- * File \c open operation.
- *
- * \param inode device inode.
- * \param filp file pointer.
- *
- * Puts the dev->fops corresponding to the device minor number into
- * \p filp, call the \c open method, and restore the file operations.
- */
-int drm_stub_open(struct inode *inode, struct file *filp)
-{
- struct drm_device *dev = NULL;
- struct drm_minor *minor;
- int minor_id = iminor(inode);
- int err = -ENODEV;
- const struct file_operations *old_fops;
-
- DRM_DEBUG("\n");
-
- mutex_lock(&drm_global_mutex);
- minor = idr_find(&drm_minors_idr, minor_id);
- if (!minor)
- goto out;
-
- if (!(dev = minor->dev))
- goto out;
-
- old_fops = filp->f_op;
- filp->f_op = fops_get(&dev->driver->fops);
- if (filp->f_op == NULL) {
- filp->f_op = old_fops;
- goto out;
- }
- if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
- fops_put(filp->f_op);
- filp->f_op = fops_get(old_fops);
- }
- fops_put(old_fops);
-
-out:
- mutex_unlock(&drm_global_mutex);
- return err;
-}
-
-/**
- * Check whether DRI will run on this CPU.
- *
- * \return non-zero if the DRI will run on this CPU, or zero otherwise.
- */
-static int drm_cpu_valid(void)
-{
-#if defined(__i386__)
- if (boot_cpu_data.x86 == 3)
- return 0; /* No cmpxchg on a 386 */
-#endif
-#if defined(__sparc__) && !defined(__sparc_v9__)
- return 0; /* No cmpxchg before v9 sparc. */
-#endif
- return 1;
-}
-
-/**
- * Called whenever a process opens /dev/drm.
- *
- * \param inode device inode.
- * \param filp file pointer.
- * \param dev device.
- * \return zero on success or a negative number on failure.
- *
- * Creates and initializes a drm_file structure for the file private data in \p
- * filp and add it into the double linked list in \p dev.
- */
-static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_device * dev)
-{
- int minor_id = iminor(inode);
- struct drm_file *priv;
- int ret;
-
- if (filp->f_flags & O_EXCL)
- return -EBUSY; /* No exclusive opens */
- if (!drm_cpu_valid())
- return -EINVAL;
-
- DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- filp->private_data = priv;
- priv->filp = filp;
- priv->uid = current_euid();
- priv->pid = task_pid_nr(current);
- priv->minor = idr_find(&drm_minors_idr, minor_id);
- priv->ioctl_count = 0;
- /* for compatibility root is always authenticated */
- priv->authenticated = capable(CAP_SYS_ADMIN);
- priv->lock_count = 0;
-
- INIT_LIST_HEAD(&priv->lhead);
- INIT_LIST_HEAD(&priv->fbs);
- INIT_LIST_HEAD(&priv->event_list);
- init_waitqueue_head(&priv->event_wait);
- priv->event_space = 4096; /* set aside 4k for event buffer */
-
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_open(dev, priv);
-
- if (dev->driver->open) {
- ret = dev->driver->open(dev, priv);
- if (ret < 0)
- goto out_free;
- }
-
-
- /* if there is no current master make this fd it */
- mutex_lock(&dev->struct_mutex);
- if (!priv->minor->master) {
- /* create a new master */
- priv->minor->master = drm_master_create(priv->minor);
- if (!priv->minor->master) {
- mutex_unlock(&dev->struct_mutex);
- ret = -ENOMEM;
- goto out_free;
- }
-
- priv->is_master = 1;
- /* take another reference for the copy in the local file priv */
- priv->master = drm_master_get(priv->minor->master);
-
- priv->authenticated = 1;
-
- mutex_unlock(&dev->struct_mutex);
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, priv->master);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- /* drop both references if this fails */
- drm_master_put(&priv->minor->master);
- drm_master_put(&priv->master);
- mutex_unlock(&dev->struct_mutex);
- goto out_free;
- }
- }
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, priv, true);
- if (ret) {
- /* drop both references if this fails */
- drm_master_put(&priv->minor->master);
- drm_master_put(&priv->master);
- mutex_unlock(&dev->struct_mutex);
- goto out_free;
- }
- }
- mutex_unlock(&dev->struct_mutex);
- } else {
- /* get a reference to the master */
- priv->master = drm_master_get(priv->minor->master);
- mutex_unlock(&dev->struct_mutex);
- }
-
- mutex_lock(&dev->struct_mutex);
- list_add(&priv->lhead, &dev->filelist);
- mutex_unlock(&dev->struct_mutex);
-
-#ifdef __alpha__
- /*
- * Default the hose
- */
- if (!dev->hose) {
- struct pci_dev *pci_dev;
- pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
- if (pci_dev) {
- dev->hose = pci_dev->sysdata;
- pci_dev_put(pci_dev);
- }
- if (!dev->hose) {
- struct pci_bus *b = pci_bus_b(pci_root_buses.next);
- if (b)
- dev->hose = b->sysdata;
- }
- }
-#endif
-
- return 0;
- out_free:
- kfree(priv);
- filp->private_data = NULL;
- return ret;
-}
-
-/** No-op. */
-int drm_fasync(int fd, struct file *filp, int on)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
- (long)old_encode_dev(priv->minor->device));
- return fasync_helper(fd, filp, on, &dev->buf_async);
-}
-EXPORT_SYMBOL(drm_fasync);
-
-/*
- * Reclaim locked buffers; note that this may be a bad idea if the current
- * context doesn't have the hw lock...
- */
-static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
-{
- struct drm_file *file_priv = f->private_data;
-
- if (drm_i_have_hw_lock(dev, file_priv)) {
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- } else {
- unsigned long _end = jiffies + 3 * DRM_HZ;
- int locked = 0;
-
- drm_idlelock_take(&file_priv->master->lock);
-
- /*
- * Wait for a while.
- */
- do {
- spin_lock_bh(&file_priv->master->lock.spinlock);
- locked = file_priv->master->lock.idle_has_lock;
- spin_unlock_bh(&file_priv->master->lock.spinlock);
- if (locked)
- break;
- schedule();
- } while (!time_after_eq(jiffies, _end));
-
- if (!locked) {
- DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
- "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
- "\tI will go on reclaiming the buffers anyway.\n");
- }
-
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-}
-
-static void drm_master_release(struct drm_device *dev, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
-
- if (dev->driver->reclaim_buffers_locked &&
- file_priv->master->lock.hw_lock)
- drm_reclaim_locked_buffers(dev, filp);
-
- if (dev->driver->reclaim_buffers_idlelocked &&
- file_priv->master->lock.hw_lock) {
- drm_idlelock_take(&file_priv->master->lock);
- dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-
-
- if (drm_i_have_hw_lock(dev, file_priv)) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- drm_lock_free(&file_priv->master->lock,
- _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- }
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !dev->driver->reclaim_buffers_locked) {
- dev->driver->reclaim_buffers(dev, file_priv);
- }
-}
-
-static void drm_events_release(struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e, *et;
- struct drm_pending_vblank_event *v, *vt;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- /* Remove pending flips */
- list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
- if (v->base.file_priv == file_priv) {
- list_del(&v->base.link);
- drm_vblank_put(dev, v->pipe);
- v->base.destroy(&v->base);
- }
-
- /* Remove unconsumed events */
- list_for_each_entry_safe(e, et, &file_priv->event_list, link)
- e->destroy(e);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-/**
- * Release file.
- *
- * \param inode device inode
- * \param file_priv DRM file private.
- * \return zero on success or a negative number on failure.
- *
- * If the hardware lock is held then free it, and take it again for the kernel
- * context since it's necessary to reclaim buffers. Unlink the file private
- * data from its list and free it. Decreases the open count and if it reaches
- * zero calls drm_lastclose().
- */
-int drm_release(struct inode *inode, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
- int retcode = 0;
-
- mutex_lock(&drm_global_mutex);
-
- DRM_DEBUG("open_count = %d\n", dev->open_count);
-
- if (dev->driver->preclose)
- dev->driver->preclose(dev, file_priv);
-
- /* ========================================================
- * Begin inline drm_release
- */
-
- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->device),
- dev->open_count);
-
- /* if the master has gone away we can't do anything with the lock */
- if (file_priv->minor->master)
- drm_master_release(dev, filp);
-
- drm_events_release(file_priv);
-
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_release(dev, file_priv);
-
- if (dev->driver->driver_features & DRIVER_MODESET)
- drm_fb_release(file_priv);
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == file_priv &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev,
- pos->handle);
-
- drm_ctxbitmap_free(dev, pos->handle);
-
- list_del(&pos->head);
- kfree(pos);
- --dev->ctx_count;
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-
- mutex_lock(&dev->struct_mutex);
-
- if (file_priv->is_master) {
- struct drm_master *master = file_priv->master;
- struct drm_file *temp;
- list_for_each_entry(temp, &dev->filelist, lhead) {
- if ((temp->master == file_priv->master) &&
- (temp != file_priv))
- temp->authenticated = 0;
- }
-
- /**
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
-
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
-
- if (file_priv->minor->master == file_priv->master) {
- /* drop the reference held my the minor */
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, true);
- drm_master_put(&file_priv->minor->master);
- }
- }
-
- /* drop the reference held my the file priv */
- drm_master_put(&file_priv->master);
- file_priv->is_master = 0;
- list_del(&file_priv->lhead);
- mutex_unlock(&dev->struct_mutex);
-
- if (dev->driver->postclose)
- dev->driver->postclose(dev, file_priv);
- kfree(file_priv);
-
- /* ========================================================
- * End inline drm_release
- */
-
- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
- if (!--dev->open_count) {
- if (atomic_read(&dev->ioctl_count)) {
- DRM_ERROR("Device busy: %d\n",
- atomic_read(&dev->ioctl_count));
- retcode = -EBUSY;
- } else
- retcode = drm_lastclose(dev);
- }
- mutex_unlock(&drm_global_mutex);
-
- return retcode;
-}
-EXPORT_SYMBOL(drm_release);
-
-static bool
-drm_dequeue_event(struct drm_file *file_priv,
- size_t total, size_t max, struct drm_pending_event **out)
-{
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e;
- unsigned long flags;
- bool ret = false;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- *out = NULL;
- if (list_empty(&file_priv->event_list))
- goto out;
- e = list_first_entry(&file_priv->event_list,
- struct drm_pending_event, link);
- if (e->event->length + total > max)
- goto out;
-
- file_priv->event_space += e->event->length;
- list_del(&e->link);
- *out = e;
- ret = true;
-
-out:
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return ret;
-}
-
-ssize_t drm_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_pending_event *e;
- size_t total;
- ssize_t ret;
-
- ret = wait_event_interruptible(file_priv->event_wait,
- !list_empty(&file_priv->event_list));
- if (ret < 0)
- return ret;
-
- total = 0;
- while (drm_dequeue_event(file_priv, total, count, &e)) {
- if (copy_to_user(buffer + total,
- e->event, e->event->length)) {
- total = -EFAULT;
- break;
- }
-
- total += e->event->length;
- e->destroy(e);
- }
-
- return total;
-}
-EXPORT_SYMBOL(drm_read);
-
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
-{
- struct drm_file *file_priv = filp->private_data;
- unsigned int mask = 0;
-
- poll_wait(filp, &file_priv->event_wait, wait);
-
- if (!list_empty(&file_priv->event_list))
- mask |= POLLIN | POLLRDNORM;
-
- return mask;
-}
-EXPORT_SYMBOL(drm_poll);
+++ /dev/null
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <linux/pagemap.h>
-#include "drmP.h"
-
-/** @file drm_gem.c
- *
- * This file provides some of the base ioctls and library routines for
- * the graphics memory manager implemented by each device driver.
- *
- * Because various devices have different requirements in terms of
- * synchronization and migration strategies, implementing that is left up to
- * the driver, and all that the general API provides should be generic --
- * allocating objects, reading/writing data with the cpu, freeing objects.
- * Even there, platform-dependent optimizations for reading/writing data with
- * the CPU mean we'll likely hook those out to driver-specific calls. However,
- * the DRI2 implementation wants to have at least allocate/mmap be generic.
- *
- * The goal was to have swap-backed object allocation managed through
- * struct file. However, file descriptors as handles to a struct file have
- * two major failings:
- * - Process limits prevent more than 1024 or so being used at a time by
- * default.
- * - Inability to allocate high fds will aggravate the X Server's select()
- * handling, and likely that of many GL client applications as well.
- *
- * This led to a plan of using our own integer IDs (called handles, following
- * DRM terminology) to mimic fds, and implement the fd syscalls we need as
- * ioctls. The objects themselves will still include the struct file so
- * that we can transition to fds if the required kernel infrastructure shows
- * up at a later date, and as our interface with shmfs for memory allocation.
- */
-
-/*
- * We make up offsets for buffer objects so we can recognize them at
- * mmap time.
- */
-
-/* pgoff in mmap is an unsigned long, so we need to make sure that
- * the faked up offset will fit
- */
-
-#if BITS_PER_LONG == 64
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-#else
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
-#endif
-
-/**
- * Initialize the GEM device fields
- */
-
-int
-drm_gem_init(struct drm_device *dev)
-{
- struct drm_gem_mm *mm;
-
- spin_lock_init(&dev->object_name_lock);
- idr_init(&dev->object_name_idr);
-
- mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
- if (!mm) {
- DRM_ERROR("out of memory\n");
- return -ENOMEM;
- }
-
- dev->mm_private = mm;
-
- if (drm_ht_create(&mm->offset_hash, 19)) {
- kfree(mm);
- return -ENOMEM;
- }
-
- if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE)) {
- drm_ht_remove(&mm->offset_hash);
- kfree(mm);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void
-drm_gem_destroy(struct drm_device *dev)
-{
- struct drm_gem_mm *mm = dev->mm_private;
-
- drm_mm_takedown(&mm->offset_manager);
- drm_ht_remove(&mm->offset_hash);
- kfree(mm);
- dev->mm_private = NULL;
-}
-
-/**
- * Initialize an already allocate GEM object of the specified size with
- * shmfs backing store.
- */
-int drm_gem_object_init(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size)
-{
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
- return -ENOMEM;
-
- kref_init(&obj->refcount);
- atomic_set(&obj->handle_count, 0);
- obj->size = size;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_gem_object_init);
-
-/**
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
-{
- struct drm_gem_object *obj;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- goto free;
-
- if (drm_gem_object_init(dev, obj, size) != 0)
- goto free;
-
- if (dev->driver->gem_init_object != NULL &&
- dev->driver->gem_init_object(obj) != 0) {
- goto fput;
- }
- return obj;
-fput:
- /* Object_init mangles the global counters - readjust them. */
- fput(obj->filp);
-free:
- kfree(obj);
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_object_alloc);
-
-/**
- * Removes the mapping from handle to filp for this object.
- */
-static int
-drm_gem_handle_delete(struct drm_file *filp, u32 handle)
-{
- struct drm_device *dev;
- struct drm_gem_object *obj;
-
- /* This is gross. The idr system doesn't let us try a delete and
- * return an error code. It just spews if you fail at deleting.
- * So, we have to grab a lock around finding the object and then
- * doing the delete on it and dropping the refcount, or the user
- * could race us to double-decrement the refcount and cause a
- * use-after-free later. Given the frequency of our handle lookups,
- * we may want to use ida for number allocation and a hash table
- * for the pointers, anyway.
- */
- spin_lock(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- return -EINVAL;
- }
- dev = obj->dev;
-
- /* Release reference and decrement refcount. */
- idr_remove(&filp->object_idr, handle);
- spin_unlock(&filp->table_lock);
-
- drm_gem_object_handle_unreference_unlocked(obj);
-
- return 0;
-}
-
-/**
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
- */
-int
-drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- u32 *handlep)
-{
- int ret;
-
- /*
- * Get the user-visible handle using idr.
- */
-again:
- /* ensure there is space available to allocate a handle */
- if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
-
- /* do the allocation under our spinlock */
- spin_lock(&file_priv->table_lock);
- ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
- spin_unlock(&file_priv->table_lock);
- if (ret == -EAGAIN)
- goto again;
-
- if (ret != 0)
- return ret;
-
- drm_gem_object_handle_reference(obj);
- return 0;
-}
-EXPORT_SYMBOL(drm_gem_handle_create);
-
-/** Returns a reference to the object named by the handle. */
-struct drm_gem_object *
-drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
- u32 handle)
-{
- struct drm_gem_object *obj;
-
- spin_lock(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- return NULL;
- }
-
- drm_gem_object_reference(obj);
-
- spin_unlock(&filp->table_lock);
-
- return obj;
-}
-EXPORT_SYMBOL(drm_gem_object_lookup);
-
-/**
- * Releases the handle to an mm object.
- */
-int
-drm_gem_close_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_close *args = data;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- ret = drm_gem_handle_delete(file_priv, args->handle);
-
- return ret;
-}
-
-/**
- * Create a global name for an object, returning the name.
- *
- * Note that the name does not hold a reference; when the object
- * is freed, the name goes away.
- */
-int
-drm_gem_flink_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_flink *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -ENOENT;
-
-again:
- if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
- ret = -ENOMEM;
- goto err;
- }
-
- spin_lock(&dev->object_name_lock);
- if (!obj->name) {
- ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
- &obj->name);
- args->name = (uint64_t) obj->name;
- spin_unlock(&dev->object_name_lock);
-
- if (ret == -EAGAIN)
- goto again;
-
- if (ret != 0)
- goto err;
-
- /* Allocate a reference for the name table. */
- drm_gem_object_reference(obj);
- } else {
- args->name = (uint64_t) obj->name;
- spin_unlock(&dev->object_name_lock);
- ret = 0;
- }
-
-err:
- drm_gem_object_unreference_unlocked(obj);
- return ret;
-}
-
-/**
- * Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
- */
-int
-drm_gem_open_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_open *args = data;
- struct drm_gem_object *obj;
- int ret;
- u32 handle;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- spin_lock(&dev->object_name_lock);
- obj = idr_find(&dev->object_name_idr, (int) args->name);
- if (obj)
- drm_gem_object_reference(obj);
- spin_unlock(&dev->object_name_lock);
- if (!obj)
- return -ENOENT;
-
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- drm_gem_object_unreference_unlocked(obj);
- if (ret)
- return ret;
-
- args->handle = handle;
- args->size = obj->size;
-
- return 0;
-}
-
-/**
- * Called at device open time, sets up the structure for handling refcounting
- * of mm objects.
- */
-void
-drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
-{
- idr_init(&file_private->object_idr);
- spin_lock_init(&file_private->table_lock);
-}
-
-/**
- * Called at device close to release the file's
- * handle references on objects.
- */
-static int
-drm_gem_object_release_handle(int id, void *ptr, void *data)
-{
- struct drm_gem_object *obj = ptr;
-
- drm_gem_object_handle_unreference_unlocked(obj);
-
- return 0;
-}
-
-/**
- * Called at close time when the filp is going away.
- *
- * Releases any remaining references on objects by this filp.
- */
-void
-drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
-{
- idr_for_each(&file_private->object_idr,
- &drm_gem_object_release_handle, NULL);
-
- idr_remove_all(&file_private->object_idr);
- idr_destroy(&file_private->object_idr);
-}
-
-void
-drm_gem_object_release(struct drm_gem_object *obj)
-{
- fput(obj->filp);
-}
-EXPORT_SYMBOL(drm_gem_object_release);
-
-/**
- * Called after the last reference to the object has been lost.
- * Must be called holding struct_ mutex
- *
- * Frees the object
- */
-void
-drm_gem_object_free(struct kref *kref)
-{
- struct drm_gem_object *obj = (struct drm_gem_object *) kref;
- struct drm_device *dev = obj->dev;
-
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (dev->driver->gem_free_object != NULL)
- dev->driver->gem_free_object(obj);
-}
-EXPORT_SYMBOL(drm_gem_object_free);
-
-static void drm_gem_object_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
-/**
- * Called after the last handle to the object has been closed
- *
- * Removes any name for the object. Note that this must be
- * called before drm_gem_object_free or we'll be touching
- * freed memory
- */
-void drm_gem_object_handle_free(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
-
- /* Remove any name for this object */
- spin_lock(&dev->object_name_lock);
- if (obj->name) {
- idr_remove(&dev->object_name_idr, obj->name);
- obj->name = 0;
- spin_unlock(&dev->object_name_lock);
- /*
- * The object name held a reference to this object, drop
- * that now.
- *
- * This cannot be the last reference, since the handle holds one too.
- */
- kref_put(&obj->refcount, drm_gem_object_ref_bug);
- } else
- spin_unlock(&dev->object_name_lock);
-
-}
-EXPORT_SYMBOL(drm_gem_object_handle_free);
-
-void drm_gem_vm_open(struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj = vma->vm_private_data;
-
- drm_gem_object_reference(obj);
-
- mutex_lock(&obj->dev->struct_mutex);
- drm_vm_open_locked(vma);
- mutex_unlock(&obj->dev->struct_mutex);
-}
-EXPORT_SYMBOL(drm_gem_vm_open);
-
-void drm_gem_vm_close(struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj = vma->vm_private_data;
-
- mutex_lock(&obj->dev->struct_mutex);
- drm_vm_close_locked(vma);
- drm_gem_object_unreference(obj);
- mutex_unlock(&obj->dev->struct_mutex);
-}
-EXPORT_SYMBOL(drm_gem_vm_close);
-
-
-/**
- * drm_gem_mmap - memory map routine for GEM objects
- * @filp: DRM file pointer
- * @vma: VMA for the area to be mapped
- *
- * If a driver supports GEM object mapping, mmap calls on the DRM file
- * descriptor will end up here.
- *
- * If we find the object based on the offset passed in (vma->vm_pgoff will
- * contain the fake offset we created when the GTT map ioctl was called on
- * the object), we set up the driver fault handler so that any accesses
- * to the object can be trapped, to perform migration, GTT binding, surface
- * register allocation, or performance monitoring.
- */
-int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_local_map *map = NULL;
- struct drm_gem_object *obj;
- struct drm_hash_item *hash;
- int ret = 0;
-
- mutex_lock(&dev->struct_mutex);
-
- if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
- mutex_unlock(&dev->struct_mutex);
- return drm_mmap(filp, vma);
- }
-
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map ||
- ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- /* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- obj = map->handle;
- if (!obj->dev->driver->gem_vm_ops) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
- vma->vm_ops = obj->dev->driver->gem_vm_ops;
- vma->vm_private_data = map->handle;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
- /* Take a ref for this mapping of the object, so that the fault
- * handler can dereference the mmap offset's pointer to the object.
- * This reference is cleaned up by the corresponding vm_close
- * (which should happen whether the vma was created by this call, or
- * by a vm_open due to mremap or partial unmap or whatever).
- */
- drm_gem_object_reference(obj);
-
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
-
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_mmap);
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include "drm_global.h"
-
-struct drm_global_item {
- struct mutex mutex;
- void *object;
- int refcount;
-};
-
-static struct drm_global_item glob[DRM_GLOBAL_NUM];
-
-void drm_global_init(void)
-{
- int i;
-
- for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
- struct drm_global_item *item = &glob[i];
- mutex_init(&item->mutex);
- item->object = NULL;
- item->refcount = 0;
- }
-}
-
-void drm_global_release(void)
-{
- int i;
- for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
- struct drm_global_item *item = &glob[i];
- BUG_ON(item->object != NULL);
- BUG_ON(item->refcount != 0);
- }
-}
-
-int drm_global_item_ref(struct drm_global_reference *ref)
-{
- int ret;
- struct drm_global_item *item = &glob[ref->global_type];
- void *object;
-
- mutex_lock(&item->mutex);
- if (item->refcount == 0) {
- item->object = kzalloc(ref->size, GFP_KERNEL);
- if (unlikely(item->object == NULL)) {
- ret = -ENOMEM;
- goto out_err;
- }
-
- ref->object = item->object;
- ret = ref->init(ref);
- if (unlikely(ret != 0))
- goto out_err;
-
- }
- ++item->refcount;
- ref->object = item->object;
- object = item->object;
- mutex_unlock(&item->mutex);
- return 0;
-out_err:
- mutex_unlock(&item->mutex);
- item->object = NULL;
- return ret;
-}
-EXPORT_SYMBOL(drm_global_item_ref);
-
-void drm_global_item_unref(struct drm_global_reference *ref)
-{
- struct drm_global_item *item = &glob[ref->global_type];
-
- mutex_lock(&item->mutex);
- BUG_ON(item->refcount == 0);
- BUG_ON(ref->object != item->object);
- if (--item->refcount == 0) {
- ref->release(ref);
- item->object = NULL;
- }
- mutex_unlock(&item->mutex);
-}
-EXPORT_SYMBOL(drm_global_item_unref);
-
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-#include "drm_hashtab.h"
-#include <linux/hash.h>
-#include <linux/slab.h>
-
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
-{
- unsigned int i;
-
- ht->size = 1 << order;
- ht->order = order;
- ht->fill = 0;
- ht->table = NULL;
- ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
- if (!ht->use_vmalloc) {
- ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
- }
- if (!ht->table) {
- ht->use_vmalloc = 1;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
- }
- if (!ht->table) {
- DRM_ERROR("Out of memory for hash table\n");
- return -ENOMEM;
- }
- for (i=0; i< ht->size; ++i) {
- INIT_HLIST_HEAD(&ht->table[i]);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_create);
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *list;
- unsigned int hashed_key;
- int count = 0;
-
- hashed_key = hash_long(key, ht->order);
- DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
- h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
- DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
- }
-}
-
-static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
- unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
- if (entry->key == key)
- return list;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *list, *parent;
- unsigned int hashed_key;
- unsigned long key = item->key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- parent = NULL;
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
- if (entry->key == key)
- return -EINVAL;
- if (entry->key > key)
- break;
- parent = list;
- }
- if (parent) {
- hlist_add_after(parent, &item->head);
- } else {
- hlist_add_head(&item->head, h_list);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_insert_item);
-
-/*
- * Just insert an item and return any "bits" bit key that hasn't been
- * used before.
- */
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add)
-{
- int ret;
- unsigned long mask = (1 << bits) - 1;
- unsigned long first, unshifted_key;
-
- unshifted_key = hash_long(seed, bits);
- first = unshifted_key;
- do {
- item->key = (unshifted_key << shift) + add;
- ret = drm_ht_insert_item(ht, item);
- if (ret)
- unshifted_key = (unshifted_key + 1) & mask;
- } while(ret && (unshifted_key != first));
-
- if (ret) {
- DRM_ERROR("Available key bit space exhausted\n");
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_just_insert_please);
-
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
- struct drm_hash_item **item)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key(ht, key);
- if (!list)
- return -EINVAL;
-
- *item = hlist_entry(list, struct drm_hash_item, head);
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_find_item);
-
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key(ht, key);
- if (list) {
- hlist_del_init(list);
- ht->fill--;
- return 0;
- }
- return -EINVAL;
-}
-
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- hlist_del_init(&item->head);
- ht->fill--;
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_remove_item);
-
-void drm_ht_remove(struct drm_open_hash *ht)
-{
- if (ht->table) {
- if (ht->use_vmalloc)
- vfree(ht->table);
- else
- kfree(ht->table);
- ht->table = NULL;
- }
-}
-EXPORT_SYMBOL(drm_ht_remove);
+++ /dev/null
-/**
- * \file drm_info.c
- * DRM info file implementations
- *
- * \author Ben Gamari <bgamari@gmail.com>
- */
-
-/*
- * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2008 Ben Gamari <bgamari@gmail.com>
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include "drmP.h"
-
-/**
- * Called when "/proc/dri/.../name" is read.
- *
- * Prints the device name together with the bus id if available.
- */
-int drm_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
- struct drm_master *master = minor->master;
-
- if (!master)
- return 0;
-
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->platform_device->name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s\n",
- dev->driver->platform_device->name);
- }
- } else {
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->pci_driver.name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
- dev_name(dev->dev));
- }
- }
-
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../vm" is read.
- *
- * Prints information about all mappings in drm_device::maplist.
- */
-int drm_vm_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
-
- /* Hardcoded from _DRM_FRAME_BUFFER,
- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
- const char *type;
- int i;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "slot offset size type flags address mtrr\n\n");
- i = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- map = r_list->map;
- if (!map)
- continue;
- if (map->type < 0 || map->type > 5)
- type = "??";
- else
- type = types[map->type];
-
- seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s 0x%02x 0x%08lx ",
- i,
- (unsigned long long)map->offset,
- map->size, type, map->flags,
- (unsigned long) r_list->user_token);
- if (map->mtrr < 0)
- seq_printf(m, "none\n");
- else
- seq_printf(m, "%4d\n", map->mtrr);
- i++;
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../queues" is read.
- */
-int drm_queues_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int i;
- struct drm_queue *q;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, " ctx/flags use fin"
- " blk/rw/rwf wait flushed queued"
- " locks\n\n");
- for (i = 0; i < dev->queue_count; i++) {
- q = dev->queuelist[i];
- atomic_inc(&q->use_count);
- seq_printf(m, "%5d/0x%03x %5d %5d"
- " %5d/%c%c/%c%c%c %5Zd\n",
- i,
- q->flags,
- atomic_read(&q->use_count),
- atomic_read(&q->finalization),
- atomic_read(&q->block_count),
- atomic_read(&q->block_read) ? 'r' : '-',
- atomic_read(&q->block_write) ? 'w' : '-',
- waitqueue_active(&q->read_queue) ? 'r' : '-',
- waitqueue_active(&q->write_queue) ? 'w' : '-',
- waitqueue_active(&q->flush_queue) ? 'f' : '-',
- DRM_BUFCOUNT(&q->waitlist));
- atomic_dec(&q->use_count);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../bufs" is read.
- */
-int drm_bufs_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_device_dma *dma;
- int i, seg_pages;
-
- mutex_lock(&dev->struct_mutex);
- dma = dev->dma;
- if (!dma) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- seq_printf(m, " o size count free segs pages kB\n\n");
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].buf_count) {
- seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
- seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
- i,
- dma->bufs[i].buf_size,
- dma->bufs[i].buf_count,
- atomic_read(&dma->bufs[i].freelist.count),
- dma->bufs[i].seg_count,
- seg_pages,
- seg_pages * PAGE_SIZE / 1024);
- }
- }
- seq_printf(m, "\n");
- for (i = 0; i < dma->buf_count; i++) {
- if (i && !(i % 32))
- seq_printf(m, "\n");
- seq_printf(m, " %d", dma->buflist[i]->list);
- }
- seq_printf(m, "\n");
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../vblank" is read.
- */
-int drm_vblank_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int crtc;
-
- mutex_lock(&dev->struct_mutex);
- for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
- seq_printf(m, "CRTC %d enable: %d\n",
- crtc, atomic_read(&dev->vblank_refcount[crtc]));
- seq_printf(m, "CRTC %d counter: %d\n",
- crtc, drm_vblank_count(dev, crtc));
- seq_printf(m, "CRTC %d last wait: %d\n",
- crtc, dev->last_vblank_wait[crtc]);
- seq_printf(m, "CRTC %d in modeset: %d\n",
- crtc, dev->vblank_inmodeset[crtc]);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../clients" is read.
- *
- */
-int drm_clients_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_file *priv;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "a dev pid uid magic ioctls\n\n");
- list_for_each_entry(priv, &dev->filelist, lhead) {
- seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
- priv->authenticated ? 'y' : 'n',
- priv->minor->index,
- priv->pid,
- priv->uid, priv->magic, priv->ioctl_count);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-
-int drm_gem_one_name_info(int id, void *ptr, void *data)
-{
- struct drm_gem_object *obj = ptr;
- struct seq_file *m = data;
-
- seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
-
- seq_printf(m, "%6d %8zd %7d %8d\n",
- obj->name, obj->size,
- atomic_read(&obj->handle_count),
- atomic_read(&obj->refcount.refcount));
- return 0;
-}
-
-int drm_gem_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, " name size handles refcount\n");
- idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
- return 0;
-}
-
-#if DRM_DEBUG_CODE
-
-int drm_vma_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_vma_entry *pt;
- struct vm_area_struct *vma;
-#if defined(__i386__)
- unsigned int pgprot;
-#endif
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
- atomic_read(&dev->vma_count),
- high_memory, (u64)virt_to_phys(high_memory));
-
- list_for_each_entry(pt, &dev->vmalist, head) {
- vma = pt->vma;
- if (!vma)
- continue;
- seq_printf(m,
- "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
- pt->pid, vma->vm_start, vma->vm_end,
- vma->vm_flags & VM_READ ? 'r' : '-',
- vma->vm_flags & VM_WRITE ? 'w' : '-',
- vma->vm_flags & VM_EXEC ? 'x' : '-',
- vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
- vma->vm_flags & VM_LOCKED ? 'l' : '-',
- vma->vm_flags & VM_IO ? 'i' : '-',
- vma->vm_pgoff);
-
-#if defined(__i386__)
- pgprot = pgprot_val(vma->vm_page_prot);
- seq_printf(m, " %c%c%c%c%c%c%c%c%c",
- pgprot & _PAGE_PRESENT ? 'p' : '-',
- pgprot & _PAGE_RW ? 'w' : 'r',
- pgprot & _PAGE_USER ? 'u' : 's',
- pgprot & _PAGE_PWT ? 't' : 'b',
- pgprot & _PAGE_PCD ? 'u' : 'c',
- pgprot & _PAGE_ACCESSED ? 'a' : '-',
- pgprot & _PAGE_DIRTY ? 'd' : '-',
- pgprot & _PAGE_PSE ? 'm' : 'k',
- pgprot & _PAGE_GLOBAL ? 'g' : 'l');
-#endif
- seq_printf(m, "\n");
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-#endif
-
+++ /dev/null
-/**
- * \file drm_ioc32.c
- *
- * 32-bit ioctl compatibility routines for the DRM.
- *
- * \author Paul Mackerras <paulus@samba.org>
- *
- * Copyright (C) Paul Mackerras 2005.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-#include <linux/compat.h>
-
-#include "drmP.h"
-#include "drm_core.h"
-
-#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
-#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
-#define DRM_IOCTL_GET_MAP32 DRM_IOWR(0x04, drm_map32_t)
-#define DRM_IOCTL_GET_CLIENT32 DRM_IOWR(0x05, drm_client32_t)
-#define DRM_IOCTL_GET_STATS32 DRM_IOR( 0x06, drm_stats32_t)
-
-#define DRM_IOCTL_SET_UNIQUE32 DRM_IOW( 0x10, drm_unique32_t)
-#define DRM_IOCTL_ADD_MAP32 DRM_IOWR(0x15, drm_map32_t)
-#define DRM_IOCTL_ADD_BUFS32 DRM_IOWR(0x16, drm_buf_desc32_t)
-#define DRM_IOCTL_MARK_BUFS32 DRM_IOW( 0x17, drm_buf_desc32_t)
-#define DRM_IOCTL_INFO_BUFS32 DRM_IOWR(0x18, drm_buf_info32_t)
-#define DRM_IOCTL_MAP_BUFS32 DRM_IOWR(0x19, drm_buf_map32_t)
-#define DRM_IOCTL_FREE_BUFS32 DRM_IOW( 0x1a, drm_buf_free32_t)
-
-#define DRM_IOCTL_RM_MAP32 DRM_IOW( 0x1b, drm_map32_t)
-
-#define DRM_IOCTL_SET_SAREA_CTX32 DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
-#define DRM_IOCTL_GET_SAREA_CTX32 DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
-
-#define DRM_IOCTL_RES_CTX32 DRM_IOWR(0x26, drm_ctx_res32_t)
-#define DRM_IOCTL_DMA32 DRM_IOWR(0x29, drm_dma32_t)
-
-#define DRM_IOCTL_AGP_ENABLE32 DRM_IOW( 0x32, drm_agp_mode32_t)
-#define DRM_IOCTL_AGP_INFO32 DRM_IOR( 0x33, drm_agp_info32_t)
-#define DRM_IOCTL_AGP_ALLOC32 DRM_IOWR(0x34, drm_agp_buffer32_t)
-#define DRM_IOCTL_AGP_FREE32 DRM_IOW( 0x35, drm_agp_buffer32_t)
-#define DRM_IOCTL_AGP_BIND32 DRM_IOW( 0x36, drm_agp_binding32_t)
-#define DRM_IOCTL_AGP_UNBIND32 DRM_IOW( 0x37, drm_agp_binding32_t)
-
-#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t)
-#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t)
-
-#define DRM_IOCTL_UPDATE_DRAW32 DRM_IOW( 0x3f, drm_update_draw32_t)
-
-#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
-
-typedef struct drm_version_32 {
- int version_major; /**< Major version */
- int version_minor; /**< Minor version */
- int version_patchlevel; /**< Patch level */
- u32 name_len; /**< Length of name buffer */
- u32 name; /**< Name of driver */
- u32 date_len; /**< Length of date buffer */
- u32 date; /**< User-space buffer to hold date */
- u32 desc_len; /**< Length of desc buffer */
- u32 desc; /**< User-space buffer to hold desc */
-} drm_version32_t;
-
-static int compat_drm_version(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_version32_t v32;
- struct drm_version __user *version;
- int err;
-
- if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
- return -EFAULT;
-
- version = compat_alloc_user_space(sizeof(*version));
- if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
- return -EFAULT;
- if (__put_user(v32.name_len, &version->name_len)
- || __put_user((void __user *)(unsigned long)v32.name,
- &version->name)
- || __put_user(v32.date_len, &version->date_len)
- || __put_user((void __user *)(unsigned long)v32.date,
- &version->date)
- || __put_user(v32.desc_len, &version->desc_len)
- || __put_user((void __user *)(unsigned long)v32.desc,
- &version->desc))
- return -EFAULT;
-
- err = drm_ioctl(file,
- DRM_IOCTL_VERSION, (unsigned long)version);
- if (err)
- return err;
-
- if (__get_user(v32.version_major, &version->version_major)
- || __get_user(v32.version_minor, &version->version_minor)
- || __get_user(v32.version_patchlevel, &version->version_patchlevel)
- || __get_user(v32.name_len, &version->name_len)
- || __get_user(v32.date_len, &version->date_len)
- || __get_user(v32.desc_len, &version->desc_len))
- return -EFAULT;
-
- if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
- return -EFAULT;
- return 0;
-}
-
-typedef struct drm_unique32 {
- u32 unique_len; /**< Length of unique */
- u32 unique; /**< Unique name for driver instantiation */
-} drm_unique32_t;
-
-static int compat_drm_getunique(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_unique32_t uq32;
- struct drm_unique __user *u;
- int err;
-
- if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
- return -EFAULT;
-
- u = compat_alloc_user_space(sizeof(*u));
- if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
- return -EFAULT;
- if (__put_user(uq32.unique_len, &u->unique_len)
- || __put_user((void __user *)(unsigned long)uq32.unique,
- &u->unique))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
- if (err)
- return err;
-
- if (__get_user(uq32.unique_len, &u->unique_len))
- return -EFAULT;
- if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
- return -EFAULT;
- return 0;
-}
-
-static int compat_drm_setunique(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_unique32_t uq32;
- struct drm_unique __user *u;
-
- if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
- return -EFAULT;
-
- u = compat_alloc_user_space(sizeof(*u));
- if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
- return -EFAULT;
- if (__put_user(uq32.unique_len, &u->unique_len)
- || __put_user((void __user *)(unsigned long)uq32.unique,
- &u->unique))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
-}
-
-typedef struct drm_map32 {
- u32 offset; /**< Requested physical address (0 for SAREA)*/
- u32 size; /**< Requested physical size (bytes) */
- enum drm_map_type type; /**< Type of memory to map */
- enum drm_map_flags flags; /**< Flags */
- u32 handle; /**< User-space: "Handle" to pass to mmap() */
- int mtrr; /**< MTRR slot used */
-} drm_map32_t;
-
-static int compat_drm_getmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- drm_map32_t m32;
- struct drm_map __user *map;
- int idx, err;
- void *handle;
-
- if (get_user(idx, &argp->offset))
- return -EFAULT;
-
- map = compat_alloc_user_space(sizeof(*map));
- if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
- return -EFAULT;
- if (__put_user(idx, &map->offset))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
- if (err)
- return err;
-
- if (__get_user(m32.offset, &map->offset)
- || __get_user(m32.size, &map->size)
- || __get_user(m32.type, &map->type)
- || __get_user(m32.flags, &map->flags)
- || __get_user(handle, &map->handle)
- || __get_user(m32.mtrr, &map->mtrr))
- return -EFAULT;
-
- m32.handle = (unsigned long)handle;
- if (copy_to_user(argp, &m32, sizeof(m32)))
- return -EFAULT;
- return 0;
-
-}
-
-static int compat_drm_addmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- drm_map32_t m32;
- struct drm_map __user *map;
- int err;
- void *handle;
-
- if (copy_from_user(&m32, argp, sizeof(m32)))
- return -EFAULT;
-
- map = compat_alloc_user_space(sizeof(*map));
- if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
- return -EFAULT;
- if (__put_user(m32.offset, &map->offset)
- || __put_user(m32.size, &map->size)
- || __put_user(m32.type, &map->type)
- || __put_user(m32.flags, &map->flags))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
- if (err)
- return err;
-
- if (__get_user(m32.offset, &map->offset)
- || __get_user(m32.mtrr, &map->mtrr)
- || __get_user(handle, &map->handle))
- return -EFAULT;
-
- m32.handle = (unsigned long)handle;
- if (m32.handle != (unsigned long)handle && printk_ratelimit())
- printk(KERN_ERR "compat_drm_addmap truncated handle"
- " %p for type %d offset %x\n",
- handle, m32.type, m32.offset);
-
- if (copy_to_user(argp, &m32, sizeof(m32)))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_rmmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- struct drm_map __user *map;
- u32 handle;
-
- if (get_user(handle, &argp->handle))
- return -EFAULT;
-
- map = compat_alloc_user_space(sizeof(*map));
- if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
- return -EFAULT;
- if (__put_user((void *)(unsigned long)handle, &map->handle))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
-}
-
-typedef struct drm_client32 {
- int idx; /**< Which client desired? */
- int auth; /**< Is client authenticated? */
- u32 pid; /**< Process ID */
- u32 uid; /**< User ID */
- u32 magic; /**< Magic */
- u32 iocs; /**< Ioctl count */
-} drm_client32_t;
-
-static int compat_drm_getclient(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_client32_t c32;
- drm_client32_t __user *argp = (void __user *)arg;
- struct drm_client __user *client;
- int idx, err;
-
- if (get_user(idx, &argp->idx))
- return -EFAULT;
-
- client = compat_alloc_user_space(sizeof(*client));
- if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
- return -EFAULT;
- if (__put_user(idx, &client->idx))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
- if (err)
- return err;
-
- if (__get_user(c32.auth, &client->auth)
- || __get_user(c32.pid, &client->pid)
- || __get_user(c32.uid, &client->uid)
- || __get_user(c32.magic, &client->magic)
- || __get_user(c32.iocs, &client->iocs))
- return -EFAULT;
-
- if (copy_to_user(argp, &c32, sizeof(c32)))
- return -EFAULT;
- return 0;
-}
-
-typedef struct drm_stats32 {
- u32 count;
- struct {
- u32 value;
- enum drm_stat_type type;
- } data[15];
-} drm_stats32_t;
-
-static int compat_drm_getstats(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_stats32_t s32;
- drm_stats32_t __user *argp = (void __user *)arg;
- struct drm_stats __user *stats;
- int i, err;
-
- stats = compat_alloc_user_space(sizeof(*stats));
- if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
- if (err)
- return err;
-
- if (__get_user(s32.count, &stats->count))
- return -EFAULT;
- for (i = 0; i < 15; ++i)
- if (__get_user(s32.data[i].value, &stats->data[i].value)
- || __get_user(s32.data[i].type, &stats->data[i].type))
- return -EFAULT;
-
- if (copy_to_user(argp, &s32, sizeof(s32)))
- return -EFAULT;
- return 0;
-}
-
-typedef struct drm_buf_desc32 {
- int count; /**< Number of buffers of this size */
- int size; /**< Size in bytes */
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
- int flags;
- u32 agp_start; /**< Start address in the AGP aperture */
-} drm_buf_desc32_t;
-
-static int compat_drm_addbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_desc32_t __user *argp = (void __user *)arg;
- struct drm_buf_desc __user *buf;
- int err;
- unsigned long agp_start;
-
- buf = compat_alloc_user_space(sizeof(*buf));
- if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
- || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
- return -EFAULT;
-
- if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
- || __get_user(agp_start, &argp->agp_start)
- || __put_user(agp_start, &buf->agp_start))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
- if (err)
- return err;
-
- if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
- || __get_user(agp_start, &buf->agp_start)
- || __put_user(agp_start, &argp->agp_start))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_markbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_desc32_t b32;
- drm_buf_desc32_t __user *argp = (void __user *)arg;
- struct drm_buf_desc __user *buf;
-
- if (copy_from_user(&b32, argp, sizeof(b32)))
- return -EFAULT;
-
- buf = compat_alloc_user_space(sizeof(*buf));
- if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
- return -EFAULT;
-
- if (__put_user(b32.size, &buf->size)
- || __put_user(b32.low_mark, &buf->low_mark)
- || __put_user(b32.high_mark, &buf->high_mark))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
-}
-
-typedef struct drm_buf_info32 {
- int count; /**< Entries in list */
- u32 list;
-} drm_buf_info32_t;
-
-static int compat_drm_infobufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_info32_t req32;
- drm_buf_info32_t __user *argp = (void __user *)arg;
- drm_buf_desc32_t __user *to;
- struct drm_buf_info __user *request;
- struct drm_buf_desc __user *list;
- size_t nbytes;
- int i, err;
- int count, actual;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- count = req32.count;
- to = (drm_buf_desc32_t __user *) (unsigned long)req32.list;
- if (count < 0)
- count = 0;
- if (count > 0
- && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
- return -EFAULT;
-
- nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
- request = compat_alloc_user_space(nbytes);
- if (!access_ok(VERIFY_WRITE, request, nbytes))
- return -EFAULT;
- list = (struct drm_buf_desc *) (request + 1);
-
- if (__put_user(count, &request->count)
- || __put_user(list, &request->list))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
- if (err)
- return err;
-
- if (__get_user(actual, &request->count))
- return -EFAULT;
- if (count >= actual)
- for (i = 0; i < actual; ++i)
- if (__copy_in_user(&to[i], &list[i],
- offsetof(struct drm_buf_desc, flags)))
- return -EFAULT;
-
- if (__put_user(actual, &argp->count))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_buf_pub32 {
- int idx; /**< Index into the master buffer list */
- int total; /**< Buffer size */
- int used; /**< Amount of buffer in use (for DMA) */
- u32 address; /**< Address of buffer */
-} drm_buf_pub32_t;
-
-typedef struct drm_buf_map32 {
- int count; /**< Length of the buffer list */
- u32 virtual; /**< Mmap'd area in user-virtual */
- u32 list; /**< Buffer information */
-} drm_buf_map32_t;
-
-static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_map32_t __user *argp = (void __user *)arg;
- drm_buf_map32_t req32;
- drm_buf_pub32_t __user *list32;
- struct drm_buf_map __user *request;
- struct drm_buf_pub __user *list;
- int i, err;
- int count, actual;
- size_t nbytes;
- void __user *addr;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
- count = req32.count;
- list32 = (void __user *)(unsigned long)req32.list;
-
- if (count < 0)
- return -EINVAL;
- nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
- request = compat_alloc_user_space(nbytes);
- if (!access_ok(VERIFY_WRITE, request, nbytes))
- return -EFAULT;
- list = (struct drm_buf_pub *) (request + 1);
-
- if (__put_user(count, &request->count)
- || __put_user(list, &request->list))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
- if (err)
- return err;
-
- if (__get_user(actual, &request->count))
- return -EFAULT;
- if (count >= actual)
- for (i = 0; i < actual; ++i)
- if (__copy_in_user(&list32[i], &list[i],
- offsetof(struct drm_buf_pub, address))
- || __get_user(addr, &list[i].address)
- || __put_user((unsigned long)addr,
- &list32[i].address))
- return -EFAULT;
-
- if (__put_user(actual, &argp->count)
- || __get_user(addr, &request->virtual)
- || __put_user((unsigned long)addr, &argp->virtual))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_buf_free32 {
- int count;
- u32 list;
-} drm_buf_free32_t;
-
-static int compat_drm_freebufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_free32_t req32;
- struct drm_buf_free __user *request;
- drm_buf_free32_t __user *argp = (void __user *)arg;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
- return -EFAULT;
- if (__put_user(req32.count, &request->count)
- || __put_user((int __user *)(unsigned long)req32.list,
- &request->list))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
-}
-
-typedef struct drm_ctx_priv_map32 {
- unsigned int ctx_id; /**< Context requesting private mapping */
- u32 handle; /**< Handle of map */
-} drm_ctx_priv_map32_t;
-
-static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_priv_map32_t req32;
- struct drm_ctx_priv_map __user *request;
- drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
- return -EFAULT;
- if (__put_user(req32.ctx_id, &request->ctx_id)
- || __put_user((void *)(unsigned long)req32.handle,
- &request->handle))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
-}
-
-static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct drm_ctx_priv_map __user *request;
- drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
- int err;
- unsigned int ctx_id;
- void *handle;
-
- if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
- || __get_user(ctx_id, &argp->ctx_id))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
- return -EFAULT;
- if (__put_user(ctx_id, &request->ctx_id))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
- if (err)
- return err;
-
- if (__get_user(handle, &request->handle)
- || __put_user((unsigned long)handle, &argp->handle))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_ctx_res32 {
- int count;
- u32 contexts;
-} drm_ctx_res32_t;
-
-static int compat_drm_resctx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_res32_t __user *argp = (void __user *)arg;
- drm_ctx_res32_t res32;
- struct drm_ctx_res __user *res;
- int err;
-
- if (copy_from_user(&res32, argp, sizeof(res32)))
- return -EFAULT;
-
- res = compat_alloc_user_space(sizeof(*res));
- if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
- return -EFAULT;
- if (__put_user(res32.count, &res->count)
- || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
- &res->contexts))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
- if (err)
- return err;
-
- if (__get_user(res32.count, &res->count)
- || __put_user(res32.count, &argp->count))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_dma32 {
- int context; /**< Context handle */
- int send_count; /**< Number of buffers to send */
- u32 send_indices; /**< List of handles to buffers */
- u32 send_sizes; /**< Lengths of data to send */
- enum drm_dma_flags flags; /**< Flags */
- int request_count; /**< Number of buffers requested */
- int request_size; /**< Desired size for buffers */
- u32 request_indices; /**< Buffer information */
- u32 request_sizes;
- int granted_count; /**< Number of buffers granted */
-} drm_dma32_t;
-
-static int compat_drm_dma(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_dma32_t d32;
- drm_dma32_t __user *argp = (void __user *)arg;
- struct drm_dma __user *d;
- int err;
-
- if (copy_from_user(&d32, argp, sizeof(d32)))
- return -EFAULT;
-
- d = compat_alloc_user_space(sizeof(*d));
- if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
- return -EFAULT;
-
- if (__put_user(d32.context, &d->context)
- || __put_user(d32.send_count, &d->send_count)
- || __put_user((int __user *)(unsigned long)d32.send_indices,
- &d->send_indices)
- || __put_user((int __user *)(unsigned long)d32.send_sizes,
- &d->send_sizes)
- || __put_user(d32.flags, &d->flags)
- || __put_user(d32.request_count, &d->request_count)
- || __put_user((int __user *)(unsigned long)d32.request_indices,
- &d->request_indices)
- || __put_user((int __user *)(unsigned long)d32.request_sizes,
- &d->request_sizes))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
- if (err)
- return err;
-
- if (__get_user(d32.request_size, &d->request_size)
- || __get_user(d32.granted_count, &d->granted_count)
- || __put_user(d32.request_size, &argp->request_size)
- || __put_user(d32.granted_count, &argp->granted_count))
- return -EFAULT;
-
- return 0;
-}
-
-#if __OS_HAS_AGP
-typedef struct drm_agp_mode32 {
- u32 mode; /**< AGP mode */
-} drm_agp_mode32_t;
-
-static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_mode32_t __user *argp = (void __user *)arg;
- drm_agp_mode32_t m32;
- struct drm_agp_mode __user *mode;
-
- if (get_user(m32.mode, &argp->mode))
- return -EFAULT;
-
- mode = compat_alloc_user_space(sizeof(*mode));
- if (put_user(m32.mode, &mode->mode))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
-}
-
-typedef struct drm_agp_info32 {
- int agp_version_major;
- int agp_version_minor;
- u32 mode;
- u32 aperture_base; /* physical address */
- u32 aperture_size; /* bytes */
- u32 memory_allowed; /* bytes */
- u32 memory_used;
-
- /* PCI information */
- unsigned short id_vendor;
- unsigned short id_device;
-} drm_agp_info32_t;
-
-static int compat_drm_agp_info(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_info32_t __user *argp = (void __user *)arg;
- drm_agp_info32_t i32;
- struct drm_agp_info __user *info;
- int err;
-
- info = compat_alloc_user_space(sizeof(*info));
- if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
- if (err)
- return err;
-
- if (__get_user(i32.agp_version_major, &info->agp_version_major)
- || __get_user(i32.agp_version_minor, &info->agp_version_minor)
- || __get_user(i32.mode, &info->mode)
- || __get_user(i32.aperture_base, &info->aperture_base)
- || __get_user(i32.aperture_size, &info->aperture_size)
- || __get_user(i32.memory_allowed, &info->memory_allowed)
- || __get_user(i32.memory_used, &info->memory_used)
- || __get_user(i32.id_vendor, &info->id_vendor)
- || __get_user(i32.id_device, &info->id_device))
- return -EFAULT;
-
- if (copy_to_user(argp, &i32, sizeof(i32)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_agp_buffer32 {
- u32 size; /**< In bytes -- will round to page boundary */
- u32 handle; /**< Used for binding / unbinding */
- u32 type; /**< Type of memory to allocate */
- u32 physical; /**< Physical used by i810 */
-} drm_agp_buffer32_t;
-
-static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_buffer32_t __user *argp = (void __user *)arg;
- drm_agp_buffer32_t req32;
- struct drm_agp_buffer __user *request;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.size, &request->size)
- || __put_user(req32.type, &request->type))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
- if (err)
- return err;
-
- if (__get_user(req32.handle, &request->handle)
- || __get_user(req32.physical, &request->physical)
- || copy_to_user(argp, &req32, sizeof(req32))) {
- drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int compat_drm_agp_free(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_buffer32_t __user *argp = (void __user *)arg;
- struct drm_agp_buffer __user *request;
- u32 handle;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || get_user(handle, &argp->handle)
- || __put_user(handle, &request->handle))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
-}
-
-typedef struct drm_agp_binding32 {
- u32 handle; /**< From drm_agp_buffer */
- u32 offset; /**< In bytes -- will round to page boundary */
-} drm_agp_binding32_t;
-
-static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_binding32_t __user *argp = (void __user *)arg;
- drm_agp_binding32_t req32;
- struct drm_agp_binding __user *request;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.handle, &request->handle)
- || __put_user(req32.offset, &request->offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
-}
-
-static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_binding32_t __user *argp = (void __user *)arg;
- struct drm_agp_binding __user *request;
- u32 handle;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || get_user(handle, &argp->handle)
- || __put_user(handle, &request->handle))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
-}
-#endif /* __OS_HAS_AGP */
-
-typedef struct drm_scatter_gather32 {
- u32 size; /**< In bytes -- will round to page boundary */
- u32 handle; /**< Used for mapping / unmapping */
-} drm_scatter_gather32_t;
-
-static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather __user *request;
- int err;
- unsigned long x;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
- || __get_user(x, &argp->size)
- || __put_user(x, &request->size))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
- if (err)
- return err;
-
- /* XXX not sure about the handle conversion here... */
- if (__get_user(x, &request->handle)
- || __put_user(x >> PAGE_SHIFT, &argp->handle))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_sg_free(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather __user *request;
- unsigned long x;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
- || __get_user(x, &argp->handle)
- || __put_user(x << PAGE_SHIFT, &request->handle))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
-}
-
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
-typedef struct drm_update_draw32 {
- drm_drawable_t handle;
- unsigned int type;
- unsigned int num;
- /* 64-bit version has a 32-bit pad here */
- u64 data; /**< Pointer */
-} __attribute__((packed)) drm_update_draw32_t;
-
-static int compat_drm_update_draw(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_update_draw32_t update32;
- struct drm_update_draw __user *request;
- int err;
-
- if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
- __put_user(update32.handle, &request->handle) ||
- __put_user(update32.type, &request->type) ||
- __put_user(update32.num, &request->num) ||
- __put_user(update32.data, &request->data))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
- return err;
-}
-#endif
-
-struct drm_wait_vblank_request32 {
- enum drm_vblank_seq_type type;
- unsigned int sequence;
- u32 signal;
-};
-
-struct drm_wait_vblank_reply32 {
- enum drm_vblank_seq_type type;
- unsigned int sequence;
- s32 tval_sec;
- s32 tval_usec;
-};
-
-typedef union drm_wait_vblank32 {
- struct drm_wait_vblank_request32 request;
- struct drm_wait_vblank_reply32 reply;
-} drm_wait_vblank32_t;
-
-static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_wait_vblank32_t __user *argp = (void __user *)arg;
- drm_wait_vblank32_t req32;
- union drm_wait_vblank __user *request;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.request.type, &request->request.type)
- || __put_user(req32.request.sequence, &request->request.sequence)
- || __put_user(req32.request.signal, &request->request.signal))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
- if (err)
- return err;
-
- if (__get_user(req32.reply.type, &request->reply.type)
- || __get_user(req32.reply.sequence, &request->reply.sequence)
- || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
- || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
- return -EFAULT;
-
- if (copy_to_user(argp, &req32, sizeof(req32)))
- return -EFAULT;
-
- return 0;
-}
-
-drm_ioctl_compat_t *drm_compat_ioctls[] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
- [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
- [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
- [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
- [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
- [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
- [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
- [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
-#if __OS_HAS_AGP
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
-#endif
- [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
- [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
- [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
-#endif
- [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
-};
-
-/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/drm.
- *
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
- */
-long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned int nr = DRM_IOCTL_NR(cmd);
- drm_ioctl_compat_t *fn;
- int ret;
-
- /* Assume that ioctls without an explicit compat routine will just
- * work. This may not always be a good assumption, but it's better
- * than always failing.
- */
- if (nr >= ARRAY_SIZE(drm_compat_ioctls))
- return drm_ioctl(filp, cmd, arg);
-
- fn = drm_compat_ioctls[nr];
-
- if (fn != NULL)
- ret = (*fn) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
- return ret;
-}
-
-EXPORT_SYMBOL(drm_compat_ioctl);
+++ /dev/null
-/**
- * \file drm_ioctl.c
- * IOCTL processing for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include "drm_core.h"
-
-#include "linux/pci.h"
-
-/**
- * Get the bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_unique structure.
- * \return zero on success or a negative number on failure.
- *
- * Copies the bus id from drm_device::unique into user space.
- */
-int drm_getunique(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
-
- if (u->unique_len >= master->unique_len) {
- if (copy_to_user(u->unique, master->unique, master->unique_len))
- return -EFAULT;
- }
- u->unique_len = master->unique_len;
-
- return 0;
-}
-
-static void
-drm_unset_busid(struct drm_device *dev,
- struct drm_master *master)
-{
- kfree(dev->devname);
- dev->devname = NULL;
-
- kfree(master->unique);
- master->unique = NULL;
- master->unique_len = 0;
- master->unique_size = 0;
-}
-
-/**
- * Set the bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_unique structure.
- * \return zero on success or a negative number on failure.
- *
- * Copies the bus id from userspace into drm_device::unique, and verifies that
- * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
- * in interface version 1.1 and will return EBUSY when setversion has requested
- * version 1.1 or greater.
- */
-int drm_setunique(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
- int domain, bus, slot, func, ret;
-
- if (master->unique_len || master->unique)
- return -EBUSY;
-
- if (!u->unique_len || u->unique_len > 1024)
- return -EINVAL;
-
- master->unique_len = u->unique_len;
- master->unique_size = u->unique_len + 1;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (!master->unique) {
- ret = -ENOMEM;
- goto err;
- }
-
- if (copy_from_user(master->unique, u->unique, master->unique_len)) {
- ret = -EFAULT;
- goto err;
- }
-
- master->unique[master->unique_len] = '\0';
-
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
-
- /* Return error if the busid submitted doesn't match the device's actual
- * busid.
- */
- ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3) {
- ret = -EINVAL;
- goto err;
- }
-
- domain = bus >> 8;
- bus &= 0xff;
-
- if ((domain != drm_get_pci_domain(dev)) ||
- (bus != dev->pdev->bus->number) ||
- (slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn))) {
- ret = -EINVAL;
- goto err;
- }
-
- return 0;
-
-err:
- drm_unset_busid(dev, master);
- return ret;
-}
-
-static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
- int len, ret;
-
- if (master->unique != NULL)
- drm_unset_busid(dev, master);
-
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- master->unique_len = 10 + strlen(dev->platformdev->name);
- master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
-
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "platform:%s", dev->platformdev->name);
-
- if (len > master->unique_len) {
- DRM_ERROR("Unique buffer overflowed\n");
- ret = -EINVAL;
- goto err;
- }
-
- dev->devname =
- kmalloc(strlen(dev->platformdev->name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->platformdev->name,
- master->unique);
-
- } else {
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
- if (len >= master->unique_len) {
- DRM_ERROR("buffer overflow");
- ret = -EINVAL;
- goto err;
- } else
- master->unique_len = len;
-
- dev->devname =
- kmalloc(strlen(dev->driver->pci_driver.name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
- }
-
- return 0;
-
-err:
- drm_unset_busid(dev, master);
- return ret;
-}
-
-/**
- * Get a mapping information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_map structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the mapping with the specified offset and copies its information
- * into userspace
- */
-int drm_getmap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *r_list = NULL;
- struct list_head *list;
- int idx;
- int i;
-
- idx = map->offset;
-
- mutex_lock(&dev->struct_mutex);
- if (idx < 0) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- i = 0;
- list_for_each(list, &dev->maplist) {
- if (i == idx) {
- r_list = list_entry(list, struct drm_map_list, head);
- break;
- }
- i++;
- }
- if (!r_list || !r_list->map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- map->offset = r_list->map->offset;
- map->size = r_list->map->size;
- map->type = r_list->map->type;
- map->flags = r_list->map->flags;
- map->handle = (void *)(unsigned long) r_list->user_token;
- map->mtrr = r_list->map->mtrr;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Get client information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_client structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the client with the specified index and copies its information
- * into userspace
- */
-int drm_getclient(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_client *client = data;
- struct drm_file *pt;
- int idx;
- int i;
-
- idx = client->idx;
- mutex_lock(&dev->struct_mutex);
-
- i = 0;
- list_for_each_entry(pt, &dev->filelist, lhead) {
- if (i++ >= idx) {
- client->auth = pt->authenticated;
- client->pid = pt->pid;
- client->uid = pt->uid;
- client->magic = pt->magic;
- client->iocs = pt->ioctl_count;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
- }
- }
- mutex_unlock(&dev->struct_mutex);
-
- return -EINVAL;
-}
-
-/**
- * Get statistics information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_stats structure.
- *
- * \return zero on success or a negative number on failure.
- */
-int drm_getstats(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_stats *stats = data;
- int i;
-
- memset(stats, 0, sizeof(*stats));
-
- mutex_lock(&dev->struct_mutex);
-
- for (i = 0; i < dev->counters; i++) {
- if (dev->types[i] == _DRM_STAT_LOCK)
- stats->data[i].value =
- (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
- else
- stats->data[i].value = atomic_read(&dev->counts[i]);
- stats->data[i].type = dev->types[i];
- }
-
- stats->count = dev->counters;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Setversion ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Sets the requested interface version
- */
-int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_set_version *sv = data;
- int if_version, retcode = 0;
-
- if (sv->drm_di_major != -1) {
- if (sv->drm_di_major != DRM_IF_MAJOR ||
- sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
- retcode = -EINVAL;
- goto done;
- }
- if_version = DRM_IF_VERSION(sv->drm_di_major,
- sv->drm_di_minor);
- dev->if_version = max(if_version, dev->if_version);
- if (sv->drm_di_minor >= 1) {
- /*
- * Version 1.1 includes tying of DRM to specific device
- * Version 1.4 has proper PCI domain support
- */
- retcode = drm_set_busid(dev, file_priv);
- if (retcode)
- goto done;
- }
- }
-
- if (sv->drm_dd_major != -1) {
- if (sv->drm_dd_major != dev->driver->major ||
- sv->drm_dd_minor < 0 || sv->drm_dd_minor >
- dev->driver->minor) {
- retcode = -EINVAL;
- goto done;
- }
-
- if (dev->driver->set_version)
- dev->driver->set_version(dev, sv);
- }
-
-done:
- sv->drm_di_major = DRM_IF_MAJOR;
- sv->drm_di_minor = DRM_IF_MINOR;
- sv->drm_dd_major = dev->driver->major;
- sv->drm_dd_minor = dev->driver->minor;
-
- return retcode;
-}
-
-/** No-op ioctl. */
-int drm_noop(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DRM_DEBUG("\n");
- return 0;
-}
+++ /dev/null
-/**
- * \file drm_irq.c
- * IRQ support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include "drm_trace.h"
-
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/slab.h>
-
-#include <linux/vgaarb.h>
-/**
- * Get interrupt from bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_irq_busid structure.
- * \return zero on success or a negative number on failure.
- *
- * Finds the PCI device with the specified bus id and gets its IRQ number.
- * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
- * to that of the device that this DRM instance attached to.
- */
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_irq_busid *p = data;
-
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return -EINVAL;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
- return -EINVAL;
-
- p->irq = dev->pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
-
- return 0;
-}
-
-static void vblank_disable_fn(unsigned long arg)
-{
- struct drm_device *dev = (struct drm_device *)arg;
- unsigned long irqflags;
- int i;
-
- if (!dev->vblank_disable_allowed)
- return;
-
- for (i = 0; i < dev->num_crtcs; i++) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
- dev->vblank_enabled[i]) {
- DRM_DEBUG("disabling vblank on crtc %d\n", i);
- dev->last_vblank[i] =
- dev->driver->get_vblank_counter(dev, i);
- dev->driver->disable_vblank(dev, i);
- dev->vblank_enabled[i] = 0;
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- }
-}
-
-void drm_vblank_cleanup(struct drm_device *dev)
-{
- /* Bail if the driver didn't call drm_vblank_init() */
- if (dev->num_crtcs == 0)
- return;
-
- del_timer(&dev->vblank_disable_timer);
-
- vblank_disable_fn((unsigned long)dev);
-
- kfree(dev->vbl_queue);
- kfree(dev->_vblank_count);
- kfree(dev->vblank_refcount);
- kfree(dev->vblank_enabled);
- kfree(dev->last_vblank);
- kfree(dev->last_vblank_wait);
- kfree(dev->vblank_inmodeset);
-
- dev->num_crtcs = 0;
-}
-EXPORT_SYMBOL(drm_vblank_cleanup);
-
-int drm_vblank_init(struct drm_device *dev, int num_crtcs)
-{
- int i, ret = -ENOMEM;
-
- setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
- (unsigned long)dev);
- spin_lock_init(&dev->vbl_lock);
- dev->num_crtcs = num_crtcs;
-
- dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vbl_queue)
- goto err;
-
- dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
- if (!dev->_vblank_count)
- goto err;
-
- dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vblank_refcount)
- goto err;
-
- dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_enabled)
- goto err;
-
- dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank)
- goto err;
-
- dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank_wait)
- goto err;
-
- dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_inmodeset)
- goto err;
-
- /* Zero per-crtc vblank stuff */
- for (i = 0; i < num_crtcs; i++) {
- init_waitqueue_head(&dev->vbl_queue[i]);
- atomic_set(&dev->_vblank_count[i], 0);
- atomic_set(&dev->vblank_refcount[i], 0);
- }
-
- dev->vblank_disable_allowed = 0;
- return 0;
-
-err:
- drm_vblank_cleanup(dev);
- return ret;
-}
-EXPORT_SYMBOL(drm_vblank_init);
-
-static void drm_irq_vgaarb_nokms(void *cookie, bool state)
-{
- struct drm_device *dev = cookie;
-
- if (dev->driver->vgaarb_irq) {
- dev->driver->vgaarb_irq(dev, state);
- return;
- }
-
- if (!dev->irq_enabled)
- return;
-
- if (state)
- dev->driver->irq_uninstall(dev);
- else {
- dev->driver->irq_preinstall(dev);
- dev->driver->irq_postinstall(dev);
- }
-}
-
-/**
- * Install IRQ handler.
- *
- * \param dev DRM device.
- *
- * Initializes the IRQ related data. Installs the handler, calling the driver
- * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
- * before and after the installation.
- */
-int drm_irq_install(struct drm_device *dev)
-{
- int ret = 0;
- unsigned long sh_flags = 0;
- char *irqname;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
- if (drm_dev_to_irq(dev) == 0)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
-
- /* Driver must have been initialized */
- if (!dev->dev_private) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- if (dev->irq_enabled) {
- mutex_unlock(&dev->struct_mutex);
- return -EBUSY;
- }
- dev->irq_enabled = 1;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
-
- /* Before installing handler */
- dev->driver->irq_preinstall(dev);
-
- /* Install handler */
- if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
- sh_flags = IRQF_SHARED;
-
- if (dev->devname)
- irqname = dev->devname;
- else
- irqname = dev->driver->name;
-
- ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
- sh_flags, irqname, dev);
-
- if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
-
- /* After installing handler */
- ret = dev->driver->irq_postinstall(dev);
- if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(drm_irq_install);
-
-/**
- * Uninstall the IRQ handler.
- *
- * \param dev DRM device.
- *
- * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
- */
-int drm_irq_uninstall(struct drm_device * dev)
-{
- unsigned long irqflags;
- int irq_enabled, i;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- irq_enabled = dev->irq_enabled;
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * Wake up any waiters so they don't hang.
- */
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queue[i]);
- dev->vblank_enabled[i] = 0;
- dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- if (!irq_enabled)
- return -EINVAL;
-
- DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- vga_client_register(dev->pdev, NULL, NULL, NULL);
-
- dev->driver->irq_uninstall(dev);
-
- free_irq(drm_dev_to_irq(dev), dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_irq_uninstall);
-
-/**
- * IRQ control ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_control structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls irq_install() or irq_uninstall() according to \p arg.
- */
-int drm_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_control *ctl = data;
-
- /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
-
-
- switch (ctl->func) {
- case DRM_INST_HANDLER:
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
- if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != drm_dev_to_irq(dev))
- return -EINVAL;
- return drm_irq_install(dev);
- case DRM_UNINST_HANDLER:
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
- return drm_irq_uninstall(dev);
- default:
- return -EINVAL;
- }
-}
-
-/**
- * drm_vblank_count - retrieve "cooked" vblank counter value
- * @dev: DRM device
- * @crtc: which counter to retrieve
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity.
- */
-u32 drm_vblank_count(struct drm_device *dev, int crtc)
-{
- return atomic_read(&dev->_vblank_count[crtc]);
-}
-EXPORT_SYMBOL(drm_vblank_count);
-
-/**
- * drm_update_vblank_count - update the master vblank counter
- * @dev: DRM device
- * @crtc: counter to update
- *
- * Call back into the driver to update the appropriate vblank counter
- * (specified by @crtc). Deal with wraparound, if it occurred, and
- * update the last read value so we can deal with wraparound on the next
- * call if necessary.
- *
- * Only necessary when going from off->on, to account for frames we
- * didn't get an interrupt for.
- *
- * Note: caller must hold dev->vbl_lock since this reads & writes
- * device vblank fields.
- */
-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
-{
- u32 cur_vblank, diff;
-
- /*
- * Interrupts were disabled prior to this call, so deal with counter
- * wrap if needed.
- * NOTE! It's possible we lost a full dev->max_vblank_count events
- * here if the register is small or we had vblank interrupts off for
- * a long time.
- */
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
- diff = cur_vblank - dev->last_vblank[crtc];
- if (cur_vblank < dev->last_vblank[crtc]) {
- diff += dev->max_vblank_count;
-
- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, dev->last_vblank[crtc], cur_vblank, diff);
- }
-
- DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
- crtc, diff);
-
- atomic_add(diff, &dev->_vblank_count[crtc]);
-}
-
-/**
- * drm_vblank_get - get a reference count on vblank events
- * @dev: DRM device
- * @crtc: which CRTC to own
- *
- * Acquire a reference count on vblank events to avoid having them disabled
- * while in use.
- *
- * RETURNS
- * Zero on success, nonzero on failure.
- */
-int drm_vblank_get(struct drm_device *dev, int crtc)
-{
- unsigned long irqflags;
- int ret = 0;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- /* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
- if (!dev->vblank_enabled[crtc]) {
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
- if (ret)
- atomic_dec(&dev->vblank_refcount[crtc]);
- else {
- dev->vblank_enabled[crtc] = 1;
- drm_update_vblank_count(dev, crtc);
- }
- }
- } else {
- if (!dev->vblank_enabled[crtc]) {
- atomic_dec(&dev->vblank_refcount[crtc]);
- ret = -EINVAL;
- }
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_vblank_get);
-
-/**
- * drm_vblank_put - give up ownership of vblank events
- * @dev: DRM device
- * @crtc: which counter to give up
- *
- * Release ownership of a given vblank counter, turning off interrupts
- * if possible.
- */
-void drm_vblank_put(struct drm_device *dev, int crtc)
-{
- BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
-
- /* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
- mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
-}
-EXPORT_SYMBOL(drm_vblank_put);
-
-void drm_vblank_off(struct drm_device *dev, int crtc)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->driver->disable_vblank(dev, crtc);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
- dev->vblank_enabled[crtc] = 0;
- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-}
-EXPORT_SYMBOL(drm_vblank_off);
-
-/**
- * drm_vblank_pre_modeset - account for vblanks across mode sets
- * @dev: DRM device
- * @crtc: CRTC in question
- * @post: post or pre mode set?
- *
- * Account for vblank events across mode setting events, which will likely
- * reset the hardware frame counter.
- */
-void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
-{
- /* vblank is not initialized (IRQ not installed ?) */
- if (!dev->num_crtcs)
- return;
- /*
- * To avoid all the problems that might happen if interrupts
- * were enabled/disabled around or between these calls, we just
- * have the kernel take a reference on the CRTC (just once though
- * to avoid corrupting the count if multiple, mismatch calls occur),
- * so that interrupts remain enabled in the interim.
- */
- if (!dev->vblank_inmodeset[crtc]) {
- dev->vblank_inmodeset[crtc] = 0x1;
- if (drm_vblank_get(dev, crtc) == 0)
- dev->vblank_inmodeset[crtc] |= 0x2;
- }
-}
-EXPORT_SYMBOL(drm_vblank_pre_modeset);
-
-void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
-{
- unsigned long irqflags;
-
- if (dev->vblank_inmodeset[crtc]) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->vblank_disable_allowed = 1;
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- if (dev->vblank_inmodeset[crtc] & 0x2)
- drm_vblank_put(dev, crtc);
-
- dev->vblank_inmodeset[crtc] = 0;
- }
-}
-EXPORT_SYMBOL(drm_vblank_post_modeset);
-
-/**
- * drm_modeset_ctl - handle vblank event counter changes across mode switch
- * @DRM_IOCTL_ARGS: standard ioctl arguments
- *
- * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
- * ioctls around modesetting so that any lost vblank events are accounted for.
- *
- * Generally the counter will reset across mode sets. If interrupts are
- * enabled around this call, we don't have to do anything since the counter
- * will have already been incremented.
- */
-int drm_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
-
- /* If drm_vblank_init() hasn't been called yet, just no-op */
- if (!dev->num_crtcs)
- goto out;
-
- crtc = modeset->crtc;
- if (crtc >= dev->num_crtcs) {
- ret = -EINVAL;
- goto out;
- }
-
- switch (modeset->cmd) {
- case _DRM_PRE_MODESET:
- drm_vblank_pre_modeset(dev, crtc);
- break;
- case _DRM_POST_MODESET:
- drm_vblank_post_modeset(dev, crtc);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
-out:
- return ret;
-}
-
-static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
- union drm_wait_vblank *vblwait,
- struct drm_file *file_priv)
-{
- struct drm_pending_vblank_event *e;
- struct timeval now;
- unsigned long flags;
- unsigned int seq;
- int ret;
-
- e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL) {
- ret = -ENOMEM;
- goto err_put;
- }
-
- e->pipe = pipe;
- e->base.pid = current->pid;
- e->event.base.type = DRM_EVENT_VBLANK;
- e->event.base.length = sizeof e->event;
- e->event.user_data = vblwait->request.signal;
- e->base.event = &e->event.base;
- e->base.file_priv = file_priv;
- e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
-
- do_gettimeofday(&now);
- spin_lock_irqsave(&dev->event_lock, flags);
-
- if (file_priv->event_space < sizeof e->event) {
- ret = -EBUSY;
- goto err_unlock;
- }
-
- file_priv->event_space -= sizeof e->event;
- seq = drm_vblank_count(dev, pipe);
- if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1 << 23)) {
- vblwait->request.sequence = seq + 1;
- vblwait->reply.sequence = vblwait->request.sequence;
- }
-
- DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
- vblwait->request.sequence, seq, pipe);
-
- trace_drm_vblank_event_queued(current->pid, pipe,
- vblwait->request.sequence);
-
- e->event.sequence = vblwait->request.sequence;
- if ((seq - vblwait->request.sequence) <= (1 << 23)) {
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- drm_vblank_put(dev, pipe);
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(current->pid, pipe,
- vblwait->request.sequence);
- } else {
- list_add_tail(&e->base.link, &dev->vblank_event_list);
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return 0;
-
-err_unlock:
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
-err_put:
- drm_vblank_put(dev, pipe);
- return ret;
-}
-
-/**
- * Wait for VBLANK.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param data user argument, pointing to a drm_wait_vblank structure.
- * \return zero on success or a negative number on failure.
- *
- * This function enables the vblank interrupt on the pipe requested, then
- * sleeps waiting for the requested sequence number to occur, and drops
- * the vblank interrupt refcount afterwards. (vblank irq disable follows that
- * after a timeout with no further vblank waits scheduled).
- */
-int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- union drm_wait_vblank *vblwait = data;
- int ret = 0;
- unsigned int flags, seq, crtc;
-
- if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
- return -EINVAL;
-
- if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
- return -EINVAL;
-
- if (vblwait->request.type &
- ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
- DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
- vblwait->request.type,
- (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
- return -EINVAL;
- }
-
- flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-
- if (crtc >= dev->num_crtcs)
- return -EINVAL;
-
- ret = drm_vblank_get(dev, crtc);
- if (ret) {
- DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
- return ret;
- }
- seq = drm_vblank_count(dev, crtc);
-
- switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
- case _DRM_VBLANK_RELATIVE:
- vblwait->request.sequence += seq;
- vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
- case _DRM_VBLANK_ABSOLUTE:
- break;
- default:
- ret = -EINVAL;
- goto done;
- }
-
- if (flags & _DRM_VBLANK_EVENT)
- return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
-
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1<<23)) {
- vblwait->request.sequence = seq + 1;
- }
-
- DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
- vblwait->request.sequence, crtc);
- dev->last_vblank_wait[crtc] = vblwait->request.sequence;
- DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
- (((drm_vblank_count(dev, crtc) -
- vblwait->request.sequence) <= (1 << 23)) ||
- !dev->irq_enabled));
-
- if (ret != -EINTR) {
- struct timeval now;
-
- do_gettimeofday(&now);
-
- vblwait->reply.tval_sec = now.tv_sec;
- vblwait->reply.tval_usec = now.tv_usec;
- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
- DRM_DEBUG("returning %d to client\n",
- vblwait->reply.sequence);
- } else {
- DRM_DEBUG("vblank wait interrupted by signal\n");
- }
-
-done:
- drm_vblank_put(dev, crtc);
- return ret;
-}
-
-void drm_handle_vblank_events(struct drm_device *dev, int crtc)
-{
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long flags;
- unsigned int seq;
-
- do_gettimeofday(&now);
- seq = drm_vblank_count(dev, crtc);
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
- if (e->pipe != crtc)
- continue;
- if ((seq - e->event.sequence) > (1<<23))
- continue;
-
- DRM_DEBUG("vblank event on %d, current %d\n",
- e->event.sequence, seq);
-
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- trace_drm_vblank_event(crtc, seq);
-}
-
-/**
- * drm_handle_vblank - handle a vblank event
- * @dev: DRM device
- * @crtc: where this event occurred
- *
- * Drivers should call this routine in their vblank interrupt handlers to
- * update the vblank counter and send any signals that may be pending.
- */
-void drm_handle_vblank(struct drm_device *dev, int crtc)
-{
- if (!dev->num_crtcs)
- return;
-
- atomic_inc(&dev->_vblank_count[crtc]);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
- drm_handle_vblank_events(dev, crtc);
-}
-EXPORT_SYMBOL(drm_handle_vblank);
+++ /dev/null
-/**
- * \file drm_lock.c
- * IOCTLs for locking
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-static int drm_notifier(void *priv);
-
-static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
-
-/**
- * Lock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Add the current task to the lock wait queue, and attempt to take to lock.
- */
-int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- DECLARE_WAITQUEUE(entry, current);
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
- int ret = 0;
-
- ++file_priv->lock_count;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock->context, task_pid_nr(current),
- master->lock.hw_lock->lock, lock->flags);
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
- if (lock->context < 0)
- return -EINVAL;
-
- add_wait_queue(&master->lock.lock_queue, &entry);
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters++;
- spin_unlock_bh(&master->lock.spinlock);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (!master->lock.hw_lock) {
- /* Device has been unregistered */
- send_sig(SIGTERM, current, 0);
- ret = -EINTR;
- break;
- }
- if (drm_lock_take(&master->lock, lock->context)) {
- master->lock.file_priv = file_priv;
- master->lock.lock_time = jiffies;
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
- break; /* Got lock */
- }
-
- /* Contention */
- mutex_unlock(&drm_global_mutex);
- schedule();
- mutex_lock(&drm_global_mutex);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters--;
- spin_unlock_bh(&master->lock.spinlock);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&master->lock.lock_queue, &entry);
-
- DRM_DEBUG("%d %s\n", lock->context,
- ret ? "interrupted" : "has lock");
- if (ret) return ret;
-
- /* don't set the block all signals on the master process for now
- * really probably not the correct answer but lets us debug xkb
- * xserver for now */
- if (!file_priv->is_master) {
- sigemptyset(&dev->sigmask);
- sigaddset(&dev->sigmask, SIGSTOP);
- sigaddset(&dev->sigmask, SIGTSTP);
- sigaddset(&dev->sigmask, SIGTTIN);
- sigaddset(&dev->sigmask, SIGTTOU);
- dev->sigdata.context = lock->context;
- dev->sigdata.lock = master->lock.hw_lock;
- block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
- }
-
- if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
- {
- if (dev->driver->dma_quiescent(dev)) {
- DRM_DEBUG("%d waiting for DMA quiescent\n",
- lock->context);
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
-/**
- * Unlock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Transfer and free the lock.
- */
-int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-
- if (drm_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
- }
-
- unblock_all_signals();
- return 0;
-}
-
-/**
- * Take the heavyweight lock.
- *
- * \param lock lock pointer.
- * \param context locking context.
- * \return one if the lock is held, or zero otherwise.
- *
- * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static
-int drm_lock_take(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- do {
- old = *lock;
- if (old & _DRM_LOCK_HELD)
- new = old | _DRM_LOCK_CONT;
- else {
- new = context | _DRM_LOCK_HELD |
- ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
- _DRM_LOCK_CONT : 0);
- }
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- spin_unlock_bh(&lock_data->spinlock);
-
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
- }
- }
-
- if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
- /* Have lock */
- return 1;
- }
- return 0;
-}
-
-/**
- * This takes a lock forcibly and hands it to context. Should ONLY be used
- * inside *_unlock to give lock to kernel before calling *_dma_schedule.
- *
- * \param dev DRM device.
- * \param lock lock pointer.
- * \param context locking context.
- * \return always one.
- *
- * Resets the lock file pointer.
- * Marks the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static int drm_lock_transfer(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- lock_data->file_priv = NULL;
- do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- return 1;
-}
-
-/**
- * Free lock.
- *
- * \param dev DRM device.
- * \param lock lock.
- * \param context context.
- *
- * Resets the lock file pointer.
- * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
- * waiting on the lock queue.
- */
-int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (lock_data->kernel_waiters != 0) {
- drm_lock_transfer(lock_data, 0);
- lock_data->idle_has_lock = 1;
- spin_unlock_bh(&lock_data->spinlock);
- return 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-
- do {
- old = *lock;
- new = _DRM_LOCKING_CONTEXT(old);
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
-
- if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
- DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- return 1;
- }
- wake_up_interruptible(&lock_data->lock_queue);
- return 0;
-}
-
-/**
- * If we get here, it means that the process has called DRM_IOCTL_LOCK
- * without calling DRM_IOCTL_UNLOCK.
- *
- * If the lock is not held, then let the signal proceed as usual. If the lock
- * is held, then set the contended flag and keep the signal blocked.
- *
- * \param priv pointer to a drm_sigdata structure.
- * \return one if the signal should be delivered normally, or zero if the
- * signal should be blocked.
- */
-static int drm_notifier(void *priv)
-{
- struct drm_sigdata *s = (struct drm_sigdata *) priv;
- unsigned int old, new, prev;
-
- /* Allow signal delivery if lock isn't held */
- if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
- || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
- return 1;
-
- /* Otherwise, set flag to force call to
- drmUnlock */
- do {
- old = s->lock->lock;
- new = old | _DRM_LOCK_CONT;
- prev = cmpxchg(&s->lock->lock, old, new);
- } while (prev != old);
- return 0;
-}
-
-/**
- * This function returns immediately and takes the hw lock
- * with the kernel context if it is free, otherwise it gets the highest priority when and if
- * it is eventually released.
- *
- * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
- * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
- * a deadlock, which is why the "idlelock" was invented).
- *
- * This should be sufficient to wait for GPU idle without
- * having to worry about starvation.
- */
-
-void drm_idlelock_take(struct drm_lock_data *lock_data)
-{
- int ret = 0;
-
- spin_lock_bh(&lock_data->spinlock);
- lock_data->kernel_waiters++;
- if (!lock_data->idle_has_lock) {
-
- spin_unlock_bh(&lock_data->spinlock);
- ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
- spin_lock_bh(&lock_data->spinlock);
-
- if (ret == 1)
- lock_data->idle_has_lock = 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-
-void drm_idlelock_release(struct drm_lock_data *lock_data)
-{
- unsigned int old, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (--lock_data->kernel_waiters == 0) {
- if (lock_data->idle_has_lock) {
- do {
- old = *lock;
- prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
- } while (prev != old);
- wake_up_interruptible(&lock_data->lock_queue);
- lock_data->idle_has_lock = 0;
- }
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-
-int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
- return (file_priv->lock_count && master->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
- master->lock.file_priv == file_priv);
-}
+++ /dev/null
-/**
- * \file drm_memory.c
- * Memory management wrappers for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/highmem.h>
-#include "drmP.h"
-
-/**
- * Called when "/proc/dri/%dev%/mem" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param len requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * No-op.
- */
-int drm_mem_info(char *buf, char **start, off_t offset,
- int len, int *eof, void *data)
-{
- return 0;
-}
-
-#if __OS_HAS_AGP
-static void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device * dev)
-{
- unsigned long i, num_pages =
- PAGE_ALIGN(size) / PAGE_SIZE;
- struct drm_agp_mem *agpmem;
- struct page **page_map;
- struct page **phys_page_map;
- void *addr;
-
- size = PAGE_ALIGN(size);
-
-#ifdef __alpha__
- offset -= dev->hose->mem_space->start;
-#endif
-
- list_for_each_entry(agpmem, &dev->agp->memory, head)
- if (agpmem->bound <= offset
- && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
- (offset + size))
- break;
- if (&agpmem->head == &dev->agp->memory)
- return NULL;
-
- /*
- * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
- * the CPU do not get remapped by the GART. We fix this by using the kernel's
- * page-table instead (that's probably faster anyhow...).
- */
- /* note: use vmalloc() because num_pages could be large... */
- page_map = vmalloc(num_pages * sizeof(struct page *));
- if (!page_map)
- return NULL;
-
- phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
- for (i = 0; i < num_pages; ++i)
- page_map[i] = phys_page_map[i];
- addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
- vfree(page_map);
-
- return addr;
-}
-
-/** Wrapper around agp_free_memory() */
-void drm_free_agp(DRM_AGP_MEM * handle, int pages)
-{
- agp_free_memory(handle);
-}
-EXPORT_SYMBOL(drm_free_agp);
-
-/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
-{
- return agp_bind_memory(handle, start);
-}
-
-/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(DRM_AGP_MEM * handle)
-{
- return agp_unbind_memory(handle);
-}
-EXPORT_SYMBOL(drm_unbind_agp);
-
-#else /* __OS_HAS_AGP */
-static inline void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device * dev)
-{
- return NULL;
-}
-
-#endif /* agp */
-
-void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
-{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_core_ioremap);
-
-void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
-{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap_wc(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_core_ioremap_wc);
-
-void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
-{
- if (!map->handle || !map->size)
- return;
-
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- vunmap(map->handle);
- else
- iounmap(map->handle);
-}
-EXPORT_SYMBOL(drm_core_ioremapfree);
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-
-/*
- * Generic simple memory manager implementation. Intended to be used as a base
- * class implementation for more advanced memory managers.
- *
- * Note that the algorithm used is quite simple and there might be substantial
- * performance gains if a smarter free list is implemented. Currently it is just an
- * unordered stack of free regions. This could easily be improved if an RB-tree
- * is used instead. At least if we expect heavy fragmentation.
- *
- * Aligned allocations can also see improvement.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-#include "drm_mm.h"
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-
-#define MM_UNUSED_TARGET 4
-
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
- struct drm_mm_node *child;
-
- if (atomic)
- child = kzalloc(sizeof(*child), GFP_ATOMIC);
- else
- child = kzalloc(sizeof(*child), GFP_KERNEL);
-
- if (unlikely(child == NULL)) {
- spin_lock(&mm->unused_lock);
- if (list_empty(&mm->unused_nodes))
- child = NULL;
- else {
- child =
- list_entry(mm->unused_nodes.next,
- struct drm_mm_node, free_stack);
- list_del(&child->free_stack);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
- }
- return child;
-}
-
-/* drm_mm_pre_get() - pre allocate drm_mm_node structure
- * drm_mm: memory manager struct we are pre-allocating for
- *
- * Returns 0 on success or -ENOMEM if allocation fails.
- */
-int drm_mm_pre_get(struct drm_mm *mm)
-{
- struct drm_mm_node *node;
-
- spin_lock(&mm->unused_lock);
- while (mm->num_unused < MM_UNUSED_TARGET) {
- spin_unlock(&mm->unused_lock);
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- spin_lock(&mm->unused_lock);
-
- if (unlikely(node == NULL)) {
- int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
- spin_unlock(&mm->unused_lock);
- return ret;
- }
- ++mm->num_unused;
- list_add_tail(&node->free_stack, &mm->unused_nodes);
- }
- spin_unlock(&mm->unused_lock);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_pre_get);
-
-static int drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size, int atomic)
-{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(mm, atomic);
- if (unlikely(child == NULL))
- return -ENOMEM;
-
- child->free = 1;
- child->size = size;
- child->start = start;
- child->mm = mm;
-
- list_add_tail(&child->node_list, &mm->node_list);
- list_add_tail(&child->free_stack, &mm->free_stack);
-
- return 0;
-}
-
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size,
- int atomic)
-{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(parent->mm, atomic);
- if (unlikely(child == NULL))
- return NULL;
-
- INIT_LIST_HEAD(&child->free_stack);
-
- child->size = size;
- child->start = parent->start;
- child->mm = parent->mm;
-
- list_add_tail(&child->node_list, &parent->node_list);
- INIT_LIST_HEAD(&child->free_stack);
-
- parent->size -= size;
- parent->start += size;
- return child;
-}
-
-
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- int atomic)
-{
-
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
-
- if (alignment)
- tmp = node->start % alignment;
-
- if (tmp) {
- align_splitoff =
- drm_mm_split_at_start(node, alignment - tmp, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
-
- if (node->size == size) {
- list_del_init(&node->free_stack);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
-
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
-
- return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
-
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int atomic)
-{
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
- unsigned wasted = 0;
-
- if (node->start < start)
- wasted += start - node->start;
- if (alignment)
- tmp = ((node->start + wasted) % alignment);
-
- if (tmp)
- wasted += alignment - tmp;
- if (wasted) {
- align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
-
- if (node->size == size) {
- list_del_init(&node->free_stack);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
-
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
-
- return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-
-/*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
- */
-
-void drm_mm_put_block(struct drm_mm_node *cur)
-{
-
- struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->node_list;
- struct list_head *root_head = &mm->node_list;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
-
- int merged = 0;
-
- BUG_ON(cur->scanned_block || cur->scanned_prev_free
- || cur->scanned_next_free);
-
- if (cur_head->prev != root_head) {
- prev_node =
- list_entry(cur_head->prev, struct drm_mm_node, node_list);
- if (prev_node->free) {
- prev_node->size += cur->size;
- merged = 1;
- }
- }
- if (cur_head->next != root_head) {
- next_node =
- list_entry(cur_head->next, struct drm_mm_node, node_list);
- if (next_node->free) {
- if (merged) {
- prev_node->size += next_node->size;
- list_del(&next_node->node_list);
- list_del(&next_node->free_stack);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&next_node->free_stack,
- &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(next_node);
- spin_unlock(&mm->unused_lock);
- } else {
- next_node->size += cur->size;
- next_node->start = cur->start;
- merged = 1;
- }
- }
- }
- if (!merged) {
- cur->free = 1;
- list_add(&cur->free_stack, &mm->free_stack);
- } else {
- list_del(&cur->node_list);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&cur->free_stack, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(cur);
- spin_unlock(&mm->unused_lock);
- }
-}
-
-EXPORT_SYMBOL(drm_mm_put_block);
-
-static int check_free_hole(unsigned long start, unsigned long end,
- unsigned long size, unsigned alignment)
-{
- unsigned wasted = 0;
-
- if (end - start < size)
- return 0;
-
- if (alignment) {
- unsigned tmp = start % alignment;
- if (tmp)
- wasted = alignment - tmp;
- }
-
- if (end >= start + size + wasted) {
- return 1;
- }
-
- return 0;
-}
-
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment, int best_match)
-{
- struct drm_mm_node *entry;
- struct drm_mm_node *best;
- unsigned long best_size;
-
- BUG_ON(mm->scanned_blocks);
-
- best = NULL;
- best_size = ~0UL;
-
- list_for_each_entry(entry, &mm->free_stack, free_stack) {
- if (!check_free_hole(entry->start, entry->start + entry->size,
- size, alignment))
- continue;
-
- if (!best_match)
- return entry;
-
- if (entry->size < best_size) {
- best = entry;
- best_size = entry->size;
- }
- }
-
- return best;
-}
-EXPORT_SYMBOL(drm_mm_search_free);
-
-struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int best_match)
-{
- struct drm_mm_node *entry;
- struct drm_mm_node *best;
- unsigned long best_size;
-
- BUG_ON(mm->scanned_blocks);
-
- best = NULL;
- best_size = ~0UL;
-
- list_for_each_entry(entry, &mm->free_stack, free_stack) {
- unsigned long adj_start = entry->start < start ?
- start : entry->start;
- unsigned long adj_end = entry->start + entry->size > end ?
- end : entry->start + entry->size;
-
- if (!check_free_hole(adj_start, adj_end, size, alignment))
- continue;
-
- if (!best_match)
- return entry;
-
- if (entry->size < best_size) {
- best = entry;
- best_size = entry->size;
- }
- }
-
- return best;
-}
-EXPORT_SYMBOL(drm_mm_search_free_in_range);
-
-/**
- * Initializa lru scanning.
- *
- * This simply sets up the scanning routines with the parameters for the desired
- * hole.
- *
- * Warning: As long as the scan list is non-empty, no other operations than
- * adding/removing nodes to/from the scan list are allowed.
- */
-void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
- unsigned alignment)
-{
- mm->scan_alignment = alignment;
- mm->scan_size = size;
- mm->scanned_blocks = 0;
- mm->scan_hit_start = 0;
- mm->scan_hit_size = 0;
-}
-EXPORT_SYMBOL(drm_mm_init_scan);
-
-/**
- * Add a node to the scan list that might be freed to make space for the desired
- * hole.
- *
- * Returns non-zero, if a hole has been found, zero otherwise.
- */
-int drm_mm_scan_add_block(struct drm_mm_node *node)
-{
- struct drm_mm *mm = node->mm;
- struct list_head *prev_free, *next_free;
- struct drm_mm_node *prev_node, *next_node;
-
- mm->scanned_blocks++;
-
- prev_free = next_free = NULL;
-
- BUG_ON(node->free);
- node->scanned_block = 1;
- node->free = 1;
-
- if (node->node_list.prev != &mm->node_list) {
- prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
- node_list);
-
- if (prev_node->free) {
- list_del(&prev_node->node_list);
-
- node->start = prev_node->start;
- node->size += prev_node->size;
-
- prev_node->scanned_prev_free = 1;
-
- prev_free = &prev_node->free_stack;
- }
- }
-
- if (node->node_list.next != &mm->node_list) {
- next_node = list_entry(node->node_list.next, struct drm_mm_node,
- node_list);
-
- if (next_node->free) {
- list_del(&next_node->node_list);
-
- node->size += next_node->size;
-
- next_node->scanned_next_free = 1;
-
- next_free = &next_node->free_stack;
- }
- }
-
- /* The free_stack list is not used for allocated objects, so these two
- * pointers can be abused (as long as no allocations in this memory
- * manager happens). */
- node->free_stack.prev = prev_free;
- node->free_stack.next = next_free;
-
- if (check_free_hole(node->start, node->start + node->size,
- mm->scan_size, mm->scan_alignment)) {
- mm->scan_hit_start = node->start;
- mm->scan_hit_size = node->size;
-
- return 1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_scan_add_block);
-
-/**
- * Remove a node from the scan list.
- *
- * Nodes _must_ be removed in the exact same order from the scan list as they
- * have been added, otherwise the internal state of the memory manager will be
- * corrupted.
- *
- * When the scan list is empty, the selected memory nodes can be freed. An
- * immediatly following drm_mm_search_free with best_match = 0 will then return
- * the just freed block (because its at the top of the free_stack list).
- *
- * Returns one if this block should be evicted, zero otherwise. Will always
- * return zero when no hole has been found.
- */
-int drm_mm_scan_remove_block(struct drm_mm_node *node)
-{
- struct drm_mm *mm = node->mm;
- struct drm_mm_node *prev_node, *next_node;
-
- mm->scanned_blocks--;
-
- BUG_ON(!node->scanned_block);
- node->scanned_block = 0;
- node->free = 0;
-
- prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
- free_stack);
- next_node = list_entry(node->free_stack.next, struct drm_mm_node,
- free_stack);
-
- if (prev_node) {
- BUG_ON(!prev_node->scanned_prev_free);
- prev_node->scanned_prev_free = 0;
-
- list_add_tail(&prev_node->node_list, &node->node_list);
-
- node->start = prev_node->start + prev_node->size;
- node->size -= prev_node->size;
- }
-
- if (next_node) {
- BUG_ON(!next_node->scanned_next_free);
- next_node->scanned_next_free = 0;
-
- list_add(&next_node->node_list, &node->node_list);
-
- node->size -= next_node->size;
- }
-
- INIT_LIST_HEAD(&node->free_stack);
-
- /* Only need to check for containement because start&size for the
- * complete resulting free block (not just the desired part) is
- * stored. */
- if (node->start >= mm->scan_hit_start &&
- node->start + node->size
- <= mm->scan_hit_start + mm->scan_hit_size) {
- return 1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_scan_remove_block);
-
-int drm_mm_clean(struct drm_mm * mm)
-{
- struct list_head *head = &mm->node_list;
-
- return (head->next->next == head);
-}
-EXPORT_SYMBOL(drm_mm_clean);
-
-int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
-{
- INIT_LIST_HEAD(&mm->node_list);
- INIT_LIST_HEAD(&mm->free_stack);
- INIT_LIST_HEAD(&mm->unused_nodes);
- mm->num_unused = 0;
- mm->scanned_blocks = 0;
- spin_lock_init(&mm->unused_lock);
-
- return drm_mm_create_tail_node(mm, start, size, 0);
-}
-EXPORT_SYMBOL(drm_mm_init);
-
-void drm_mm_takedown(struct drm_mm * mm)
-{
- struct list_head *bnode = mm->free_stack.next;
- struct drm_mm_node *entry;
- struct drm_mm_node *next;
-
- entry = list_entry(bnode, struct drm_mm_node, free_stack);
-
- if (entry->node_list.next != &mm->node_list ||
- entry->free_stack.next != &mm->free_stack) {
- DRM_ERROR("Memory manager not clean. Delaying takedown\n");
- return;
- }
-
- list_del(&entry->free_stack);
- list_del(&entry->node_list);
- kfree(entry);
-
- spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
- list_del(&entry->free_stack);
- kfree(entry);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
-
- BUG_ON(mm->num_unused != 0);
-}
-EXPORT_SYMBOL(drm_mm_takedown);
-
-void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
-{
- struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->node_list, node_list) {
- printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
- prefix, entry->start, entry->start + entry->size,
- entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
- }
- printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
- total_used, total_free);
-}
-EXPORT_SYMBOL(drm_mm_debug_table);
-
-#if defined(CONFIG_DEBUG_FS)
-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
-{
- struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->node_list, node_list) {
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
- }
- seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_dump_table);
-#endif
+++ /dev/null
-/*
- * Copyright © 1997-2003 by The XFree86 Project, Inc.
- * Copyright © 2007 Dave Airlie
- * Copyright © 2007-2008 Intel Corporation
- * Jesse Barnes <jesse.barnes@intel.com>
- * Copyright 2005-2006 Luc Verhaegen
- * Copyright (c) 2001, Andy Ritger aritger@nvidia.com
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Except as contained in this notice, the name of the copyright holder(s)
- * and author(s) shall not be used in advertising or otherwise to promote
- * the sale, use or other dealings in this Software without prior written
- * authorization from the copyright holder(s) and author(s).
- */
-
-#include <linux/list.h>
-#include <linux/list_sort.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
-
-/**
- * drm_mode_debug_printmodeline - debug print a mode
- * @dev: DRM device
- * @mode: mode to print
- *
- * LOCKING:
- * None.
- *
- * Describe @mode using DRM_DEBUG.
- */
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
-{
- DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
- "0x%x 0x%x\n",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
-}
-EXPORT_SYMBOL(drm_mode_debug_printmodeline);
-
-/**
- * drm_cvt_mode -create a modeline based on CVT algorithm
- * @dev: DRM device
- * @hdisplay: hdisplay size
- * @vdisplay: vdisplay size
- * @vrefresh : vrefresh rate
- * @reduced : Whether the GTF calculation is simplified
- * @interlaced:Whether the interlace is supported
- *
- * LOCKING:
- * none.
- *
- * return the modeline based on CVT algorithm
- *
- * This function is called to generate the modeline based on CVT algorithm
- * according to the hdisplay, vdisplay, vrefresh.
- * It is based from the VESA(TM) Coordinated Video Timing Generator by
- * Graham Loveridge April 9, 2003 available at
- * http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls
- *
- * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
- * What I have done is to translate it by using integer calculation.
- */
-#define HV_FACTOR 1000
-struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool reduced, bool interlaced, bool margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
-#define CVT_MARGIN_PERCENTAGE 18
- /* 2) character cell horizontal granularity (pixels) - default 8 */
-#define CVT_H_GRANULARITY 8
- /* 3) Minimum vertical porch (lines) - default 3 */
-#define CVT_MIN_V_PORCH 3
- /* 4) Minimum number of vertical back porch lines - default 6 */
-#define CVT_MIN_V_BPORCH 6
- /* Pixel Clock step (kHz) */
-#define CVT_CLOCK_STEP 250
- struct drm_display_mode *drm_mode;
- unsigned int vfieldrate, hperiod;
- int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
- int interlace;
-
- /* allocate the drm_display_mode structure. If failure, we will
- * return directly
- */
- drm_mode = drm_mode_create(dev);
- if (!drm_mode)
- return NULL;
-
- /* the CVT default refresh rate is 60Hz */
- if (!vrefresh)
- vrefresh = 60;
-
- /* the required field fresh rate */
- if (interlaced)
- vfieldrate = vrefresh * 2;
- else
- vfieldrate = vrefresh;
-
- /* horizontal pixels */
- hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
-
- /* determine the left&right borders */
- hmargin = 0;
- if (margins) {
- hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
- hmargin -= hmargin % CVT_H_GRANULARITY;
- }
- /* find the total active pixels */
- drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
-
- /* find the number of lines per field */
- if (interlaced)
- vdisplay_rnd = vdisplay / 2;
- else
- vdisplay_rnd = vdisplay;
-
- /* find the top & bottom borders */
- vmargin = 0;
- if (margins)
- vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
-
- drm_mode->vdisplay = vdisplay + 2 * vmargin;
-
- /* Interlaced */
- if (interlaced)
- interlace = 1;
- else
- interlace = 0;
-
- /* Determine VSync Width from aspect ratio */
- if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
- vsync = 4;
- else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
- vsync = 5;
- else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
- vsync = 6;
- else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
- vsync = 7;
- else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
- vsync = 7;
- else /* custom */
- vsync = 10;
-
- if (!reduced) {
- /* simplify the GTF calculation */
- /* 4) Minimum time of vertical sync + back porch interval (µs)
- * default 550.0
- */
- int tmp1, tmp2;
-#define CVT_MIN_VSYNC_BP 550
- /* 3) Nominal HSync width (% of line period) - default 8 */
-#define CVT_HSYNC_PERCENTAGE 8
- unsigned int hblank_percentage;
- int vsyncandback_porch, vback_porch, hblank;
-
- /* estimated the horizontal period */
- tmp1 = HV_FACTOR * 1000000 -
- CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
- tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
- interlace;
- hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
-
- tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
- /* 9. Find number of lines in sync + backporch */
- if (tmp1 < (vsync + CVT_MIN_V_PORCH))
- vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
- else
- vsyncandback_porch = tmp1;
- /* 10. Find number of lines in back porch */
- vback_porch = vsyncandback_porch - vsync;
- drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
- vsyncandback_porch + CVT_MIN_V_PORCH;
- /* 5) Definition of Horizontal blanking time limitation */
- /* Gradient (%/kHz) - default 600 */
-#define CVT_M_FACTOR 600
- /* Offset (%) - default 40 */
-#define CVT_C_FACTOR 40
- /* Blanking time scaling factor - default 128 */
-#define CVT_K_FACTOR 128
- /* Scaling factor weighting - default 20 */
-#define CVT_J_FACTOR 20
-#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256)
-#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
- CVT_J_FACTOR)
- /* 12. Find ideal blanking duty cycle from formula */
- hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
- hperiod / 1000;
- /* 13. Blanking time */
- if (hblank_percentage < 20 * HV_FACTOR)
- hblank_percentage = 20 * HV_FACTOR;
- hblank = drm_mode->hdisplay * hblank_percentage /
- (100 * HV_FACTOR - hblank_percentage);
- hblank -= hblank % (2 * CVT_H_GRANULARITY);
- /* 14. find the total pixes per line */
- drm_mode->htotal = drm_mode->hdisplay + hblank;
- drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
- drm_mode->hsync_start = drm_mode->hsync_end -
- (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
- drm_mode->hsync_start += CVT_H_GRANULARITY -
- drm_mode->hsync_start % CVT_H_GRANULARITY;
- /* fill the Vsync values */
- drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
- drm_mode->vsync_end = drm_mode->vsync_start + vsync;
- } else {
- /* Reduced blanking */
- /* Minimum vertical blanking interval time (µs)- default 460 */
-#define CVT_RB_MIN_VBLANK 460
- /* Fixed number of clocks for horizontal sync */
-#define CVT_RB_H_SYNC 32
- /* Fixed number of clocks for horizontal blanking */
-#define CVT_RB_H_BLANK 160
- /* Fixed number of lines for vertical front porch - default 3*/
-#define CVT_RB_VFPORCH 3
- int vbilines;
- int tmp1, tmp2;
- /* 8. Estimate Horizontal period. */
- tmp1 = HV_FACTOR * 1000000 -
- CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
- tmp2 = vdisplay_rnd + 2 * vmargin;
- hperiod = tmp1 / (tmp2 * vfieldrate);
- /* 9. Find number of lines in vertical blanking */
- vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
- /* 10. Check if vertical blanking is sufficient */
- if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
- vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
- /* 11. Find total number of lines in vertical field */
- drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
- /* 12. Find total number of pixels in a line */
- drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
- /* Fill in HSync values */
- drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
- drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
- /* Fill in VSync values */
- drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
- drm_mode->vsync_end = drm_mode->vsync_start + vsync;
- }
- /* 15/13. Find pixel clock frequency (kHz for xf86) */
- drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
- drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
- /* 18/16. Find actual vertical frame frequency */
- /* ignore - just set the mode flag for interlaced */
- if (interlaced) {
- drm_mode->vtotal *= 2;
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
- }
- /* Fill the mode line name */
- drm_mode_set_name(drm_mode);
- if (reduced)
- drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
- DRM_MODE_FLAG_NVSYNC);
- else
- drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_NHSYNC);
-
- return drm_mode;
-}
-EXPORT_SYMBOL(drm_cvt_mode);
-
-/**
- * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
- *
- * @dev :drm device
- * @hdisplay :hdisplay size
- * @vdisplay :vdisplay size
- * @vrefresh :vrefresh rate.
- * @interlaced :whether the interlace is supported
- * @margins :desired margin size
- * @GTF_[MCKJ] :extended GTF formula parameters
- *
- * LOCKING.
- * none.
- *
- * return the modeline based on full GTF algorithm.
- *
- * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
- * in here multiplied by two. For a C of 40, pass in 80.
- */
-struct drm_display_mode *
-drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
- int vrefresh, bool interlaced, int margins,
- int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
-{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
-#define GTF_MARGIN_PERCENTAGE 18
- /* 2) character cell horizontal granularity (pixels) - default 8 */
-#define GTF_CELL_GRAN 8
- /* 3) Minimum vertical porch (lines) - default 3 */
-#define GTF_MIN_V_PORCH 1
- /* width of vsync in lines */
-#define V_SYNC_RQD 3
- /* width of hsync as % of total line */
-#define H_SYNC_PERCENT 8
- /* min time of vsync + back porch (microsec) */
-#define MIN_VSYNC_PLUS_BP 550
- /* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
-#define GTF_M_PRIME (GTF_K * GTF_M / 256)
- struct drm_display_mode *drm_mode;
- unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
- int top_margin, bottom_margin;
- int interlace;
- unsigned int hfreq_est;
- int vsync_plus_bp, vback_porch;
- unsigned int vtotal_lines, vfieldrate_est, hperiod;
- unsigned int vfield_rate, vframe_rate;
- int left_margin, right_margin;
- unsigned int total_active_pixels, ideal_duty_cycle;
- unsigned int hblank, total_pixels, pixel_freq;
- int hsync, hfront_porch, vodd_front_porch_lines;
- unsigned int tmp1, tmp2;
-
- drm_mode = drm_mode_create(dev);
- if (!drm_mode)
- return NULL;
-
- /* 1. In order to give correct results, the number of horizontal
- * pixels requested is first processed to ensure that it is divisible
- * by the character size, by rounding it to the nearest character
- * cell boundary:
- */
- hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
- hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
-
- /* 2. If interlace is requested, the number of vertical lines assumed
- * by the calculation must be halved, as the computation calculates
- * the number of vertical lines per field.
- */
- if (interlaced)
- vdisplay_rnd = vdisplay / 2;
- else
- vdisplay_rnd = vdisplay;
-
- /* 3. Find the frame rate required: */
- if (interlaced)
- vfieldrate_rqd = vrefresh * 2;
- else
- vfieldrate_rqd = vrefresh;
-
- /* 4. Find number of lines in Top margin: */
- top_margin = 0;
- if (margins)
- top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
- 1000;
- /* 5. Find number of lines in bottom margin: */
- bottom_margin = top_margin;
-
- /* 6. If interlace is required, then set variable interlace: */
- if (interlaced)
- interlace = 1;
- else
- interlace = 0;
-
- /* 7. Estimate the Horizontal frequency */
- {
- tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
- tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
- 2 + interlace;
- hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
- }
-
- /* 8. Find the number of lines in V sync + back porch */
- /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
- vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
- vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
- /* 9. Find the number of lines in V back porch alone: */
- vback_porch = vsync_plus_bp - V_SYNC_RQD;
- /* 10. Find the total number of lines in Vertical field period: */
- vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
- vsync_plus_bp + GTF_MIN_V_PORCH;
- /* 11. Estimate the Vertical field frequency: */
- vfieldrate_est = hfreq_est / vtotal_lines;
- /* 12. Find the actual horizontal period: */
- hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
-
- /* 13. Find the actual Vertical field frequency: */
- vfield_rate = hfreq_est / vtotal_lines;
- /* 14. Find the Vertical frame frequency: */
- if (interlaced)
- vframe_rate = vfield_rate / 2;
- else
- vframe_rate = vfield_rate;
- /* 15. Find number of pixels in left margin: */
- if (margins)
- left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
- 1000;
- else
- left_margin = 0;
-
- /* 16.Find number of pixels in right margin: */
- right_margin = left_margin;
- /* 17.Find total number of active pixels in image and left and right */
- total_active_pixels = hdisplay_rnd + left_margin + right_margin;
- /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
- ideal_duty_cycle = GTF_C_PRIME * 1000 -
- (GTF_M_PRIME * 1000000 / hfreq_est);
- /* 19.Find the number of pixels in the blanking time to the nearest
- * double character cell: */
- hblank = total_active_pixels * ideal_duty_cycle /
- (100000 - ideal_duty_cycle);
- hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
- hblank = hblank * 2 * GTF_CELL_GRAN;
- /* 20.Find total number of pixels: */
- total_pixels = total_active_pixels + hblank;
- /* 21.Find pixel clock frequency: */
- pixel_freq = total_pixels * hfreq_est / 1000;
- /* Stage 1 computations are now complete; I should really pass
- * the results to another function and do the Stage 2 computations,
- * but I only need a few more values so I'll just append the
- * computations here for now */
- /* 17. Find the number of pixels in the horizontal sync period: */
- hsync = H_SYNC_PERCENT * total_pixels / 100;
- hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
- hsync = hsync * GTF_CELL_GRAN;
- /* 18. Find the number of pixels in horizontal front porch period */
- hfront_porch = hblank / 2 - hsync;
- /* 36. Find the number of lines in the odd front porch period: */
- vodd_front_porch_lines = GTF_MIN_V_PORCH ;
-
- /* finally, pack the results in the mode struct */
- drm_mode->hdisplay = hdisplay_rnd;
- drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
- drm_mode->hsync_end = drm_mode->hsync_start + hsync;
- drm_mode->htotal = total_pixels;
- drm_mode->vdisplay = vdisplay_rnd;
- drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
- drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
- drm_mode->vtotal = vtotal_lines;
-
- drm_mode->clock = pixel_freq;
-
- if (interlaced) {
- drm_mode->vtotal *= 2;
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
- }
-
- drm_mode_set_name(drm_mode);
- if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
- else
- drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
-
- return drm_mode;
-}
-EXPORT_SYMBOL(drm_gtf_mode_complex);
-
-/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
- *
- * @dev :drm device
- * @hdisplay :hdisplay size
- * @vdisplay :vdisplay size
- * @vrefresh :vrefresh rate.
- * @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
- *
- * LOCKING.
- * none.
- *
- * return the modeline based on GTF algorithm
- *
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- * GTF Spreadsheet by Andy Morrish (1/5/97)
- * available at http://www.vesa.org
- *
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
- *
- * Standard GTF parameters:
- * M = 600
- * C = 40
- * K = 128
- * J = 20
- */
-struct drm_display_mode *
-drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
- bool lace, int margins)
-{
- return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
- margins, 600, 40 * 2, 128, 20 * 2);
-}
-EXPORT_SYMBOL(drm_gtf_mode);
-
-/**
- * drm_mode_set_name - set the name on a mode
- * @mode: name will be set in this mode
- *
- * LOCKING:
- * None.
- *
- * Set the name of @mode to a standard format.
- */
-void drm_mode_set_name(struct drm_display_mode *mode)
-{
- bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
-
- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
- mode->hdisplay, mode->vdisplay,
- interlaced ? "i" : "");
-}
-EXPORT_SYMBOL(drm_mode_set_name);
-
-/**
- * drm_mode_list_concat - move modes from one list to another
- * @head: source list
- * @new: dst list
- *
- * LOCKING:
- * Caller must ensure both lists are locked.
- *
- * Move all the modes from @head to @new.
- */
-void drm_mode_list_concat(struct list_head *head, struct list_head *new)
-{
-
- struct list_head *entry, *tmp;
-
- list_for_each_safe(entry, tmp, head) {
- list_move_tail(entry, new);
- }
-}
-EXPORT_SYMBOL(drm_mode_list_concat);
-
-/**
- * drm_mode_width - get the width of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's width (hdisplay) value.
- *
- * FIXME: is this needed?
- *
- * RETURNS:
- * @mode->hdisplay
- */
-int drm_mode_width(struct drm_display_mode *mode)
-{
- return mode->hdisplay;
-
-}
-EXPORT_SYMBOL(drm_mode_width);
-
-/**
- * drm_mode_height - get the height of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's height (vdisplay) value.
- *
- * FIXME: is this needed?
- *
- * RETURNS:
- * @mode->vdisplay
- */
-int drm_mode_height(struct drm_display_mode *mode)
-{
- return mode->vdisplay;
-}
-EXPORT_SYMBOL(drm_mode_height);
-
-/** drm_mode_hsync - get the hsync of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @modes's hsync rate in kHz, rounded to the nearest int.
- */
-int drm_mode_hsync(struct drm_display_mode *mode)
-{
- unsigned int calc_val;
-
- if (mode->hsync)
- return mode->hsync;
-
- if (mode->htotal < 0)
- return 0;
-
- calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
- calc_val += 500; /* round to 1000Hz */
- calc_val /= 1000; /* truncate to kHz */
-
- return calc_val;
-}
-EXPORT_SYMBOL(drm_mode_hsync);
-
-/**
- * drm_mode_vrefresh - get the vrefresh of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's vrefresh rate in Hz or calculate it if necessary.
- *
- * FIXME: why is this needed? shouldn't vrefresh be set already?
- *
- * RETURNS:
- * Vertical refresh rate. It will be the result of actual value plus 0.5.
- * If it is 70.288, it will return 70Hz.
- * If it is 59.6, it will return 60Hz.
- */
-int drm_mode_vrefresh(struct drm_display_mode *mode)
-{
- int refresh = 0;
- unsigned int calc_val;
-
- if (mode->vrefresh > 0)
- refresh = mode->vrefresh;
- else if (mode->htotal > 0 && mode->vtotal > 0) {
- int vtotal;
- vtotal = mode->vtotal;
- /* work out vrefresh the value will be x1000 */
- calc_val = (mode->clock * 1000);
- calc_val /= mode->htotal;
- refresh = (calc_val + vtotal / 2) / vtotal;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- refresh *= 2;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- refresh /= 2;
- if (mode->vscan > 1)
- refresh /= mode->vscan;
- }
- return refresh;
-}
-EXPORT_SYMBOL(drm_mode_vrefresh);
-
-/**
- * drm_mode_set_crtcinfo - set CRTC modesetting parameters
- * @p: mode
- * @adjust_flags: unused? (FIXME)
- *
- * LOCKING:
- * None.
- *
- * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
- */
-void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
-{
- if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
- return;
-
- p->crtc_hdisplay = p->hdisplay;
- p->crtc_hsync_start = p->hsync_start;
- p->crtc_hsync_end = p->hsync_end;
- p->crtc_htotal = p->htotal;
- p->crtc_hskew = p->hskew;
- p->crtc_vdisplay = p->vdisplay;
- p->crtc_vsync_start = p->vsync_start;
- p->crtc_vsync_end = p->vsync_end;
- p->crtc_vtotal = p->vtotal;
-
- if (p->flags & DRM_MODE_FLAG_INTERLACE) {
- if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
- p->crtc_vdisplay /= 2;
- p->crtc_vsync_start /= 2;
- p->crtc_vsync_end /= 2;
- p->crtc_vtotal /= 2;
- }
-
- p->crtc_vtotal |= 1;
- }
-
- if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
- p->crtc_vdisplay *= 2;
- p->crtc_vsync_start *= 2;
- p->crtc_vsync_end *= 2;
- p->crtc_vtotal *= 2;
- }
-
- if (p->vscan > 1) {
- p->crtc_vdisplay *= p->vscan;
- p->crtc_vsync_start *= p->vscan;
- p->crtc_vsync_end *= p->vscan;
- p->crtc_vtotal *= p->vscan;
- }
-
- p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
- p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
- p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
- p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
-
- p->crtc_hadjusted = false;
- p->crtc_vadjusted = false;
-}
-EXPORT_SYMBOL(drm_mode_set_crtcinfo);
-
-
-/**
- * drm_mode_duplicate - allocate and duplicate an existing mode
- * @m: mode to duplicate
- *
- * LOCKING:
- * None.
- *
- * Just allocate a new mode, copy the existing mode into it, and return
- * a pointer to it. Used to create new instances of established modes.
- */
-struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode)
-{
- struct drm_display_mode *nmode;
- int new_id;
-
- nmode = drm_mode_create(dev);
- if (!nmode)
- return NULL;
-
- new_id = nmode->base.id;
- *nmode = *mode;
- nmode->base.id = new_id;
- INIT_LIST_HEAD(&nmode->head);
- return nmode;
-}
-EXPORT_SYMBOL(drm_mode_duplicate);
-
-/**
- * drm_mode_equal - test modes for equality
- * @mode1: first mode
- * @mode2: second mode
- *
- * LOCKING:
- * None.
- *
- * Check to see if @mode1 and @mode2 are equivalent.
- *
- * RETURNS:
- * True if the modes are equal, false otherwise.
- */
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
-{
- /* do clock check convert to PICOS so fb modes get matched
- * the same */
- if (mode1->clock && mode2->clock) {
- if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
- return false;
- } else if (mode1->clock != mode2->clock)
- return false;
-
- if (mode1->hdisplay == mode2->hdisplay &&
- mode1->hsync_start == mode2->hsync_start &&
- mode1->hsync_end == mode2->hsync_end &&
- mode1->htotal == mode2->htotal &&
- mode1->hskew == mode2->hskew &&
- mode1->vdisplay == mode2->vdisplay &&
- mode1->vsync_start == mode2->vsync_start &&
- mode1->vsync_end == mode2->vsync_end &&
- mode1->vtotal == mode2->vtotal &&
- mode1->vscan == mode2->vscan &&
- mode1->flags == mode2->flags)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(drm_mode_equal);
-
-/**
- * drm_mode_validate_size - make sure modes adhere to size constraints
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @maxX: maximum width
- * @maxY: maximum height
- * @maxPitch: max pitch
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * The DRM device (@dev) has size and pitch limits. Here we validate the
- * modes we probed for @dev against those limits and set their status as
- * necessary.
- */
-void drm_mode_validate_size(struct drm_device *dev,
- struct list_head *mode_list,
- int maxX, int maxY, int maxPitch)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, mode_list, head) {
- if (maxPitch > 0 && mode->hdisplay > maxPitch)
- mode->status = MODE_BAD_WIDTH;
-
- if (maxX > 0 && mode->hdisplay > maxX)
- mode->status = MODE_VIRTUAL_X;
-
- if (maxY > 0 && mode->vdisplay > maxY)
- mode->status = MODE_VIRTUAL_Y;
- }
-}
-EXPORT_SYMBOL(drm_mode_validate_size);
-
-/**
- * drm_mode_validate_clocks - validate modes against clock limits
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @min: minimum clock rate array
- * @max: maximum clock rate array
- * @n_ranges: number of clock ranges (size of arrays)
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Some code may need to check a mode list against the clock limits of the
- * device in question. This function walks the mode list, testing to make
- * sure each mode falls within a given range (defined by @min and @max
- * arrays) and sets @mode->status as needed.
- */
-void drm_mode_validate_clocks(struct drm_device *dev,
- struct list_head *mode_list,
- int *min, int *max, int n_ranges)
-{
- struct drm_display_mode *mode;
- int i;
-
- list_for_each_entry(mode, mode_list, head) {
- bool good = false;
- for (i = 0; i < n_ranges; i++) {
- if (mode->clock >= min[i] && mode->clock <= max[i]) {
- good = true;
- break;
- }
- }
- if (!good)
- mode->status = MODE_CLOCK_RANGE;
- }
-}
-EXPORT_SYMBOL(drm_mode_validate_clocks);
-
-/**
- * drm_mode_prune_invalid - remove invalid modes from mode list
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @verbose: be verbose about it
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Once mode list generation is complete, a caller can use this routine to
- * remove invalid modes from a mode list. If any of the modes have a
- * status other than %MODE_OK, they are removed from @mode_list and freed.
- */
-void drm_mode_prune_invalid(struct drm_device *dev,
- struct list_head *mode_list, bool verbose)
-{
- struct drm_display_mode *mode, *t;
-
- list_for_each_entry_safe(mode, t, mode_list, head) {
- if (mode->status != MODE_OK) {
- list_del(&mode->head);
- if (verbose) {
- drm_mode_debug_printmodeline(mode);
- DRM_DEBUG_KMS("Not using %s mode %d\n",
- mode->name, mode->status);
- }
- drm_mode_destroy(dev, mode);
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_prune_invalid);
-
-/**
- * drm_mode_compare - compare modes for favorability
- * @priv: unused
- * @lh_a: list_head for first mode
- * @lh_b: list_head for second mode
- *
- * LOCKING:
- * None.
- *
- * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
- * which is better.
- *
- * RETURNS:
- * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
- * positive if @lh_b is better than @lh_a.
- */
-static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
-{
- struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
- struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
- int diff;
-
- diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
- ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
- if (diff)
- return diff;
- diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
- if (diff)
- return diff;
- diff = b->clock - a->clock;
- return diff;
-}
-
-/**
- * drm_mode_sort - sort mode list
- * @mode_list: list to sort
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Sort @mode_list by favorability, putting good modes first.
- */
-void drm_mode_sort(struct list_head *mode_list)
-{
- list_sort(NULL, mode_list, drm_mode_compare);
-}
-EXPORT_SYMBOL(drm_mode_sort);
-
-/**
- * drm_mode_connector_list_update - update the mode list for the connector
- * @connector: the connector to update
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * This moves the modes from the @connector probed_modes list
- * to the actual mode list. It compares the probed mode against the current
- * list and only adds different modes. All modes unverified after this point
- * will be removed by the prune invalid modes.
- */
-void drm_mode_connector_list_update(struct drm_connector *connector)
-{
- struct drm_display_mode *mode;
- struct drm_display_mode *pmode, *pt;
- int found_it;
-
- list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
- head) {
- found_it = 0;
- /* go through current modes checking for the new probed mode */
- list_for_each_entry(mode, &connector->modes, head) {
- if (drm_mode_equal(pmode, mode)) {
- found_it = 1;
- /* if equal delete the probed mode */
- mode->status = pmode->status;
- /* Merge type bits together */
- mode->type |= pmode->type;
- list_del(&pmode->head);
- drm_mode_destroy(connector->dev, pmode);
- break;
- }
- }
-
- if (!found_it) {
- list_move_tail(&pmode->head, &connector->modes);
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_connector_list_update);
+++ /dev/null
-/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
-/**
- * \file drm_pci.c
- * \brief Functions and ioctls to manage PCI memory
- *
- * \warning These interfaces aren't stable yet.
- *
- * \todo Implement the remaining ioctl's for the PCI pools.
- * \todo The wrappers here are so thin that they would be better off inlined..
- *
- * \author José Fonseca <jrfonseca@tungstengraphics.com>
- * \author Leif Delgass <ldelgass@retinalburn.net>
- */
-
-/*
- * Copyright 2003 José Fonseca.
- * Copyright 2003 Leif Delgass.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include "drmP.h"
-
-/**********************************************************************/
-/** \name PCI memory */
-/*@{*/
-
-/**
- * \brief Allocate a PCI consistent memory block, for DMA.
- */
-drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
-{
- drm_dma_handle_t *dmah;
-#if 1
- unsigned long addr;
- size_t sz;
-#endif
-
- /* pci_alloc_consistent only guarantees alignment to the smallest
- * PAGE_SIZE order which is greater than or equal to the requested size.
- * Return NULL here for now to make sure nobody tries for larger alignment
- */
- if (align > size)
- return NULL;
-
- dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
- if (!dmah)
- return NULL;
-
- dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
-
- if (dmah->vaddr == NULL) {
- kfree(dmah);
- return NULL;
- }
-
- memset(dmah->vaddr, 0, size);
-
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Reserve */
- for (addr = (unsigned long)dmah->vaddr, sz = size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page(addr));
- }
-
- return dmah;
-}
-
-EXPORT_SYMBOL(drm_pci_alloc);
-
-/**
- * \brief Free a PCI consistent memory block without freeing its descriptor.
- *
- * This function is for internal use in the Linux-specific DRM core code.
- */
-void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
-#if 1
- unsigned long addr;
- size_t sz;
-#endif
-
- if (dmah->vaddr) {
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Unreserve */
- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- }
- dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
- dmah->busaddr);
- }
-}
-
-/**
- * \brief Free a PCI consistent memory block
- */
-void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
- __drm_pci_free(dev, dmah);
- kfree(dmah);
-}
-
-EXPORT_SYMBOL(drm_pci_free);
-
-#ifdef CONFIG_PCI
-/**
- * Register.
- *
- * \param pdev - PCI device structure
- * \param ent entry from the PCI ID table with device type flags
- * \return zero on success or a negative number on failure.
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- */
-int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- struct drm_device *dev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- ret = pci_enable_device(pdev);
- if (ret)
- goto err_g1;
-
- pci_set_master(pdev);
-
- dev->pdev = pdev;
- dev->dev = &pdev->dev;
-
- dev->pci_device = pdev->device;
- dev->pci_vendor = pdev->vendor;
-
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
- mutex_lock(&drm_global_mutex);
-
- if ((ret = drm_fill_in_dev(dev, ent, driver))) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g2;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- pci_set_drvdata(pdev, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g2;
- }
-
- if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
- goto err_g3;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, ent->driver_data);
- if (ret)
- goto err_g4;
- }
-
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_g4;
- }
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, pci_name(pdev), dev->primary->index);
-
- mutex_unlock(&drm_global_mutex);
- return 0;
-
-err_g4:
- drm_put_minor(&dev->primary);
-err_g3:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g2:
- pci_disable_device(pdev);
-err_g1:
- kfree(dev);
- mutex_unlock(&drm_global_mutex);
- return ret;
-}
-EXPORT_SYMBOL(drm_get_pci_dev);
-
-/**
- * PCI device initialization. Called via drm_init at module load time,
- *
- * \return zero on success or a negative number on failure.
- *
- * Initializes a drm_device structures,registering the
- * stubs and initializing the AGP device.
- *
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
-int drm_pci_init(struct drm_driver *driver)
-{
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *pid;
- int i;
-
- if (driver->driver_features & DRIVER_MODESET)
- return pci_register_driver(&driver->pci_driver);
-
- /* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
- pid = &driver->pci_driver.id_table[i];
-
- /* Loop around setting up a DRM device for each PCI device
- * matching our ID and device class. If we had the internal
- * function that pci_get_subsys and pci_get_class used, we'd
- * be able to just pass pid in instead of doing a two-stage
- * thing.
- */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
- pid->subdevice, pdev)) != NULL) {
- if ((pdev->class & pid->class_mask) != pid->class)
- continue;
-
- /* stealth mode requires a manual probe */
- pci_dev_get(pdev);
- drm_get_pci_dev(pdev, pid, driver);
- }
- }
- return 0;
-}
-
-#else
-
-int drm_pci_init(struct drm_driver *driver)
-{
- return -1;
-}
-
-#endif
-/*@}*/
+++ /dev/null
-/*
- * Derived from drm_pci.c
- *
- * Copyright 2003 José Fonseca.
- * Copyright 2003 Leif Delgass.
- * Copyright (c) 2009, Code Aurora Forum.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Register.
- *
- * \param platdev - Platform device struture
- * \return zero on success or a negative number on failure.
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- */
-
-int drm_get_platform_dev(struct platform_device *platdev,
- struct drm_driver *driver)
-{
- struct drm_device *dev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->platformdev = platdev;
- dev->dev = &platdev->dev;
-
- mutex_lock(&drm_global_mutex);
-
- ret = drm_fill_in_dev(dev, NULL, driver);
-
- if (ret) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g1;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev_set_drvdata(&platdev->dev, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g1;
- }
-
- ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
- if (ret)
- goto err_g2;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, 0);
- if (ret)
- goto err_g3;
- }
-
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_g3;
- }
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- mutex_unlock(&drm_global_mutex);
-
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, dev->primary->index);
-
- return 0;
-
-err_g3:
- drm_put_minor(&dev->primary);
-err_g2:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g1:
- kfree(dev);
- mutex_unlock(&drm_global_mutex);
- return ret;
-}
-EXPORT_SYMBOL(drm_get_platform_dev);
-
-/**
- * Platform device initialization. Called via drm_init at module load time,
- *
- * \return zero on success or a negative number on failure.
- *
- * Initializes a drm_device structures,registering the
- * stubs
- *
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
-
-int drm_platform_init(struct drm_driver *driver)
-{
- return drm_get_platform_dev(driver->platform_device, driver);
-}
+++ /dev/null
-/**
- * \file drm_proc.c
- * /proc support for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * \par Acknowledgements:
- * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
- * the problem with the proc files not outputting all their information.
- */
-
-/*
- * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include "drmP.h"
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-/**
- * Proc file list.
- */
-static struct drm_info_list drm_proc_list[] = {
- {"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
- {"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
- {"bufs", drm_bufs_info, 0},
- {"gem_names", drm_gem_name_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
- {"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
-
-static int drm_proc_open(struct inode *inode, struct file *file)
-{
- struct drm_info_node* node = PDE(inode)->data;
-
- return single_open(file, node->info_ent->show, node);
-}
-
-static const struct file_operations drm_proc_fops = {
- .owner = THIS_MODULE,
- .open = drm_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-/**
- * Initialize a given set of proc files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI proc dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of proc files represented by an array of
- * gdm_proc_lists in the given root directory.
- */
-int drm_proc_create_files(struct drm_info_list *files, int count,
- struct proc_dir_entry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct proc_dir_entry *ent;
- struct drm_info_node *tmp;
- char name[64];
- int i, ret;
-
- for (i = 0; i < count; i++) {
- u32 features = files[i].driver_features;
-
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
- continue;
-
- tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (tmp == NULL) {
- ret = -1;
- goto fail;
- }
- tmp->minor = minor;
- tmp->info_ent = &files[i];
- list_add(&tmp->list, &minor->proc_nodes.list);
-
- ent = proc_create_data(files[i].name, S_IRUGO, root,
- &drm_proc_fops, tmp);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
- name, files[i].name);
- list_del(&tmp->list);
- kfree(tmp);
- ret = -1;
- goto fail;
- }
-
- }
- return 0;
-
-fail:
- for (i = 0; i < count; i++)
- remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
- return ret;
-}
-
-/**
- * Initialize the DRI proc filesystem for a device
- *
- * \param dev DRM device
- * \param minor device minor number
- * \param root DRI proc dir entry.
- * \param dev_root resulting DRI device proc dir entry.
- * \return root entry pointer on success, or NULL on failure.
- *
- * Create the DRI proc root entry "/proc/dri", the device proc root entry
- * "/proc/dri/%minor%/", and each entry in proc_list as
- * "/proc/dri/%minor%/%name%".
- */
-int drm_proc_init(struct drm_minor *minor, int minor_id,
- struct proc_dir_entry *root)
-{
- char name[64];
- int ret;
-
- INIT_LIST_HEAD(&minor->proc_nodes.list);
- sprintf(name, "%d", minor_id);
- minor->proc_root = proc_mkdir(name, root);
- if (!minor->proc_root) {
- DRM_ERROR("Cannot create /proc/dri/%s\n", name);
- return -1;
- }
-
- ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
- minor->proc_root, minor);
- if (ret) {
- remove_proc_entry(name, root);
- minor->proc_root = NULL;
- DRM_ERROR("Failed to create core drm proc files\n");
- return ret;
- }
-
- return 0;
-}
-
-int drm_proc_remove_files(struct drm_info_list *files, int count,
- struct drm_minor *minor)
-{
- struct list_head *pos, *q;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->proc_nodes.list) {
- tmp = list_entry(pos, struct drm_info_node, list);
- if (tmp->info_ent == &files[i]) {
- remove_proc_entry(files[i].name,
- minor->proc_root);
- list_del(pos);
- kfree(tmp);
- }
- }
- }
- return 0;
-}
-
-/**
- * Cleanup the proc filesystem resources.
- *
- * \param minor device minor number.
- * \param root DRI proc dir entry.
- * \param dev_root DRI device proc dir entry.
- * \return always zero.
- *
- * Remove all proc entries created by proc_init().
- */
-int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
-{
- char name[64];
-
- if (!root || !minor->proc_root)
- return 0;
-
- drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
-
- sprintf(name, "%d", minor->index);
- remove_proc_entry(name, root);
-
- return 0;
-}
-
+++ /dev/null
-/**
- * \file drm_scatter.c
- * IOCTLs to manage scatter/gather memory
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include "drmP.h"
-
-#define DEBUG_SCATTER 0
-
-static inline void *drm_vmalloc_dma(unsigned long size)
-{
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
-#else
- return vmalloc_32(size);
-#endif
-}
-
-void drm_sg_cleanup(struct drm_sg_mem * entry)
-{
- struct page *page;
- int i;
-
- for (i = 0; i < entry->pages; i++) {
- page = entry->pagelist[i];
- if (page)
- ClearPageReserved(page);
- }
-
- vfree(entry->virtual);
-
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
-}
-
-#ifdef _LP64
-# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
-#else
-# define ScatterHandle(x) (unsigned int)(x)
-#endif
-
-int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
-{
- struct drm_sg_mem *entry;
- unsigned long pages, i, j;
-
- DRM_DEBUG("\n");
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
-
- if (dev->sg)
- return -EINVAL;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- memset(entry, 0, sizeof(*entry));
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
-
- entry->pages = pages;
- entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL);
- if (!entry->pagelist) {
- kfree(entry);
- return -ENOMEM;
- }
-
- memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
-
- entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL);
- if (!entry->busaddr) {
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
- memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
-
- entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
- if (!entry->virtual) {
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
-
- /* This also forces the mapping of COW pages, so our page list
- * will be valid. Please don't remove it...
- */
- memset(entry->virtual, 0, pages << PAGE_SHIFT);
-
- entry->handle = ScatterHandle((unsigned long)entry->virtual);
-
- DRM_DEBUG("handle = %08lx\n", entry->handle);
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- for (i = (unsigned long)entry->virtual, j = 0; j < pages;
- i += PAGE_SIZE, j++) {
- entry->pagelist[j] = vmalloc_to_page((void *)i);
- if (!entry->pagelist[j])
- goto failed;
- SetPageReserved(entry->pagelist[j]);
- }
-
- request->handle = entry->handle;
-
- dev->sg = entry;
-
-#if DEBUG_SCATTER
- /* Verify that each page points to its virtual address, and vice
- * versa.
- */
- {
- int error = 0;
-
- for (i = 0; i < pages; i++) {
- unsigned long *tmp;
-
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0xcafebabe;
- }
- tmp = (unsigned long *)((u8 *) entry->virtual +
- (PAGE_SIZE * i));
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- if (*tmp != 0xcafebabe && error == 0) {
- error = 1;
- DRM_ERROR("Scatter allocation error, "
- "pagelist does not match "
- "virtual mapping\n");
- }
- }
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0;
- }
- }
- if (error == 0)
- DRM_ERROR("Scatter allocation matches pagelist\n");
- }
-#endif
-
- return 0;
-
- failed:
- drm_sg_cleanup(entry);
- return -ENOMEM;
-}
-
-int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
-
- return drm_sg_alloc(dev, request);
-
-}
-
-int drm_sg_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
- struct drm_sg_mem *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
-
- entry = dev->sg;
- dev->sg = NULL;
-
- if (!entry || entry->handle != request->handle)
- return -EINVAL;
-
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- drm_sg_cleanup(entry);
-
- return 0;
-}
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Simple memory manager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drm_sman.h"
-
-struct drm_owner_item {
- struct drm_hash_item owner_hash;
- struct list_head sman_list;
- struct list_head mem_blocks;
-};
-
-void drm_sman_takedown(struct drm_sman * sman)
-{
- drm_ht_remove(&sman->user_hash_tab);
- drm_ht_remove(&sman->owner_hash_tab);
- kfree(sman->mm);
-}
-
-EXPORT_SYMBOL(drm_sman_takedown);
-
-int
-drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
- unsigned int user_order, unsigned int owner_order)
-{
- int ret = 0;
-
- sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
- sizeof(*sman->mm),
- GFP_KERNEL);
- if (!sman->mm) {
- ret = -ENOMEM;
- goto out;
- }
- sman->num_managers = num_managers;
- INIT_LIST_HEAD(&sman->owner_items);
- ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
- if (ret)
- goto out1;
- ret = drm_ht_create(&sman->user_hash_tab, user_order);
- if (!ret)
- goto out;
-
- drm_ht_remove(&sman->owner_hash_tab);
-out1:
- kfree(sman->mm);
-out:
- return ret;
-}
-
-EXPORT_SYMBOL(drm_sman_init);
-
-static void *drm_sman_mm_allocate(void *private, unsigned long size,
- unsigned alignment)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- struct drm_mm_node *tmp;
-
- tmp = drm_mm_search_free(mm, size, alignment, 1);
- if (!tmp) {
- return NULL;
- }
- tmp = drm_mm_get_block(tmp, size, alignment);
- return tmp;
-}
-
-static void drm_sman_mm_free(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
-
- drm_mm_put_block(node);
-}
-
-static void drm_sman_mm_destroy(void *private)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- drm_mm_takedown(mm);
- kfree(mm);
-}
-
-static unsigned long drm_sman_mm_offset(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
- return node->start;
-}
-
-int
-drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
- unsigned long start, unsigned long size)
-{
- struct drm_sman_mm *sman_mm;
- struct drm_mm *mm;
- int ret;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- return -ENOMEM;
- }
- sman_mm->private = mm;
- ret = drm_mm_init(mm, start, size);
-
- if (ret) {
- kfree(mm);
- return ret;
- }
-
- sman_mm->allocate = drm_sman_mm_allocate;
- sman_mm->free = drm_sman_mm_free;
- sman_mm->destroy = drm_sman_mm_destroy;
- sman_mm->offset = drm_sman_mm_offset;
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_set_range);
-
-int
-drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
- struct drm_sman_mm * allocator)
-{
- BUG_ON(manager >= sman->num_managers);
- sman->mm[manager] = *allocator;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_sman_set_manager);
-
-static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
- unsigned long owner)
-{
- int ret;
- struct drm_hash_item *owner_hash_item;
- struct drm_owner_item *owner_item;
-
- ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
- if (!ret) {
- return drm_hash_entry(owner_hash_item, struct drm_owner_item,
- owner_hash);
- }
-
- owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
- if (!owner_item)
- goto out;
-
- INIT_LIST_HEAD(&owner_item->mem_blocks);
- owner_item->owner_hash.key = owner;
- if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
- goto out1;
-
- list_add_tail(&owner_item->sman_list, &sman->owner_items);
- return owner_item;
-
-out1:
- kfree(owner_item);
-out:
- return NULL;
-}
-
-struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
- unsigned long size, unsigned alignment,
- unsigned long owner)
-{
- void *tmp;
- struct drm_sman_mm *sman_mm;
- struct drm_owner_item *owner_item;
- struct drm_memblock_item *memblock;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- tmp = sman_mm->allocate(sman_mm->private, size, alignment);
-
- if (!tmp) {
- return NULL;
- }
-
- memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
-
- if (!memblock)
- goto out;
-
- memblock->mm_info = tmp;
- memblock->mm = sman_mm;
- memblock->sman = sman;
-
- if (drm_ht_just_insert_please
- (&sman->user_hash_tab, &memblock->user_hash,
- (unsigned long)memblock, 32, 0, 0))
- goto out1;
-
- owner_item = drm_sman_get_owner_item(sman, owner);
- if (!owner_item)
- goto out2;
-
- list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
-
- return memblock;
-
-out2:
- drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
-out1:
- kfree(memblock);
-out:
- sman_mm->free(sman_mm->private, tmp);
-
- return NULL;
-}
-
-EXPORT_SYMBOL(drm_sman_alloc);
-
-static void drm_sman_free(struct drm_memblock_item *item)
-{
- struct drm_sman *sman = item->sman;
-
- list_del(&item->owner_list);
- drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
- item->mm->free(item->mm->private, item->mm_info);
- kfree(item);
-}
-
-int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
-{
- struct drm_hash_item *hash_item;
- struct drm_memblock_item *memblock_item;
-
- if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
- return -EINVAL;
-
- memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
- user_hash);
- drm_sman_free(memblock_item);
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_free_key);
-
-static void drm_sman_remove_owner(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- list_del(&owner_item->sman_list);
- drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
- kfree(owner_item);
-}
-
-int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
- return -1;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
- drm_sman_remove_owner(sman, owner_item);
- return -1;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_owner_clean);
-
-static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- struct drm_memblock_item *entry, *next;
-
- list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
- owner_list) {
- drm_sman_free(entry);
- }
- drm_sman_remove_owner(sman, owner_item);
-}
-
-void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-
- return;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- drm_sman_do_owner_cleanup(sman, owner_item);
-}
-
-EXPORT_SYMBOL(drm_sman_owner_cleanup);
-
-void drm_sman_cleanup(struct drm_sman *sman)
-{
- struct drm_owner_item *entry, *next;
- unsigned int i;
- struct drm_sman_mm *sman_mm;
-
- list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
- drm_sman_do_owner_cleanup(sman, entry);
- }
- if (sman->mm) {
- for (i = 0; i < sman->num_managers; ++i) {
- sman_mm = &sman->mm[i];
- if (sman_mm->private) {
- sman_mm->destroy(sman_mm->private);
- sman_mm->private = NULL;
- }
- }
- }
-}
-
-EXPORT_SYMBOL(drm_sman_cleanup);
+++ /dev/null
-/**
- * \file drm_stub.h
- * Stub support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- */
-
-/*
- * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
- *
- * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include "drmP.h"
-#include "drm_core.h"
-
-unsigned int drm_debug = 0; /* 1 to enable debug output */
-EXPORT_SYMBOL(drm_debug);
-
-MODULE_AUTHOR(CORE_AUTHOR);
-MODULE_DESCRIPTION(CORE_DESC);
-MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output");
-
-module_param_named(debug, drm_debug, int, 0600);
-
-struct idr drm_minors_idr;
-
-struct class *drm_class;
-struct proc_dir_entry *drm_proc_root;
-struct dentry *drm_debugfs_root;
-void drm_ut_debug_printk(unsigned int request_level,
- const char *prefix,
- const char *function_name,
- const char *format, ...)
-{
- va_list args;
-
- if (drm_debug & request_level) {
- if (function_name)
- printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
- va_start(args, format);
- vprintk(format, args);
- va_end(args);
- }
-}
-EXPORT_SYMBOL(drm_ut_debug_printk);
-static int drm_minor_get_id(struct drm_device *dev, int type)
-{
- int new_id;
- int ret;
- int base = 0, limit = 63;
-
- if (type == DRM_MINOR_CONTROL) {
- base += 64;
- limit = base + 127;
- } else if (type == DRM_MINOR_RENDER) {
- base += 128;
- limit = base + 255;
- }
-
-again:
- if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
- mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&drm_minors_idr, NULL,
- base, &new_id);
- mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN) {
- goto again;
- } else if (ret) {
- return ret;
- }
-
- if (new_id >= limit) {
- idr_remove(&drm_minors_idr, new_id);
- return -EINVAL;
- }
- return new_id;
-}
-
-struct drm_master *drm_master_create(struct drm_minor *minor)
-{
- struct drm_master *master;
-
- master = kzalloc(sizeof(*master), GFP_KERNEL);
- if (!master)
- return NULL;
-
- kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
- drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
- INIT_LIST_HEAD(&master->magicfree);
- master->minor = minor;
-
- list_add_tail(&master->head, &minor->master_list);
-
- return master;
-}
-
-struct drm_master *drm_master_get(struct drm_master *master)
-{
- kref_get(&master->refcount);
- return master;
-}
-EXPORT_SYMBOL(drm_master_get);
-
-static void drm_master_destroy(struct kref *kref)
-{
- struct drm_master *master = container_of(kref, struct drm_master, refcount);
- struct drm_magic_entry *pt, *next;
- struct drm_device *dev = master->minor->dev;
- struct drm_map_list *r_list, *list_temp;
-
- list_del(&master->head);
-
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
- if (r_list->master == master) {
- drm_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
-
- if (master->unique) {
- kfree(master->unique);
- master->unique = NULL;
- master->unique_len = 0;
- }
-
- kfree(dev->devname);
- dev->devname = NULL;
-
- list_for_each_entry_safe(pt, next, &master->magicfree, head) {
- list_del(&pt->head);
- drm_ht_remove_item(&master->magiclist, &pt->hash_item);
- kfree(pt);
- }
-
- drm_ht_remove(&master->magiclist);
-
- kfree(master);
-}
-
-void drm_master_put(struct drm_master **master)
-{
- kref_put(&(*master)->refcount, drm_master_destroy);
- *master = NULL;
-}
-EXPORT_SYMBOL(drm_master_put);
-
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = 0;
-
- if (file_priv->is_master)
- return 0;
-
- if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
- return -EINVAL;
-
- if (!file_priv->master)
- return -EINVAL;
-
- if (!file_priv->minor->master &&
- file_priv->minor->master != file_priv->master) {
- mutex_lock(&dev->struct_mutex);
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
- }
- mutex_unlock(&dev->struct_mutex);
- }
-
- return 0;
-}
-
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- if (!file_priv->is_master)
- return -EINVAL;
-
- if (!file_priv->minor->master)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, false);
- drm_master_put(&file_priv->minor->master);
- file_priv->is_master = 0;
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-int drm_fill_in_dev(struct drm_device *dev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- int retcode;
-
- INIT_LIST_HEAD(&dev->filelist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- INIT_LIST_HEAD(&dev->vblank_event_list);
-
- spin_lock_init(&dev->count_lock);
- spin_lock_init(&dev->event_lock);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
-
- if (drm_ht_create(&dev->map_hash, 12)) {
- return -ENOMEM;
- }
-
- /* the DRM has 6 basic counters */
- dev->counters = 6;
- dev->types[0] = _DRM_STAT_LOCK;
- dev->types[1] = _DRM_STAT_OPENS;
- dev->types[2] = _DRM_STAT_CLOSES;
- dev->types[3] = _DRM_STAT_IOCTLS;
- dev->types[4] = _DRM_STAT_LOCKS;
- dev->types[5] = _DRM_STAT_UNLOCKS;
-
- dev->driver = driver;
-
- if (drm_core_has_AGP(dev)) {
- if (drm_device_is_agp(dev))
- dev->agp = drm_agp_init(dev);
- if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
- && (dev->agp == NULL)) {
- DRM_ERROR("Cannot initialize the agpgart module.\n");
- retcode = -EINVAL;
- goto error_out_unreg;
- }
- if (drm_core_has_MTRR(dev)) {
- if (dev->agp)
- dev->agp->agp_mtrr =
- mtrr_add(dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
- }
- }
-
-
- retcode = drm_ctxbitmap_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto error_out_unreg;
- }
-
- if (driver->driver_features & DRIVER_GEM) {
- retcode = drm_gem_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot initialize graphics execution "
- "manager (GEM)\n");
- goto error_out_unreg;
- }
- }
-
- return 0;
-
- error_out_unreg:
- drm_lastclose(dev);
- return retcode;
-}
-
-
-/**
- * Get a secondary minor number.
- *
- * \param dev device data structure
- * \param sec-minor structure to hold the assigned minor
- * \return negative number on failure.
- *
- * Search an empty entry and initialize it to the given parameters, and
- * create the proc init entry via proc_init(). This routines assigns
- * minor numbers to secondary heads of multi-headed cards
- */
-int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
-{
- struct drm_minor *new_minor;
- int ret;
- int minor_id;
-
- DRM_DEBUG("\n");
-
- minor_id = drm_minor_get_id(dev, type);
- if (minor_id < 0)
- return minor_id;
-
- new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
- if (!new_minor) {
- ret = -ENOMEM;
- goto err_idr;
- }
-
- new_minor->type = type;
- new_minor->device = MKDEV(DRM_MAJOR, minor_id);
- new_minor->dev = dev;
- new_minor->index = minor_id;
- INIT_LIST_HEAD(&new_minor->master_list);
-
- idr_replace(&drm_minors_idr, new_minor, minor_id);
-
- if (type == DRM_MINOR_LEGACY) {
- ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
- goto err_mem;
- }
- } else
- new_minor->proc_root = NULL;
-
-#if defined(CONFIG_DEBUG_FS)
- ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- goto err_g2;
- }
-#endif
-
- ret = drm_sysfs_device_add(new_minor);
- if (ret) {
- printk(KERN_ERR
- "DRM: Error sysfs_device_add.\n");
- goto err_g2;
- }
- *minor = new_minor;
-
- DRM_DEBUG("new minor assigned %d\n", minor_id);
- return 0;
-
-
-err_g2:
- if (new_minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(new_minor, drm_proc_root);
-err_mem:
- kfree(new_minor);
-err_idr:
- idr_remove(&drm_minors_idr, minor_id);
- *minor = NULL;
- return ret;
-}
-
-/**
- * Put a secondary minor number.
- *
- * \param sec_minor - structure to be released
- * \return always zero
- *
- * Cleans up the proc resources. Not legal for this to be the
- * last minor released.
- *
- */
-int drm_put_minor(struct drm_minor **minor_p)
-{
- struct drm_minor *minor = *minor_p;
-
- DRM_DEBUG("release secondary minor %d\n", minor->index);
-
- if (minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(minor, drm_proc_root);
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_cleanup(minor);
-#endif
-
- drm_sysfs_device_remove(minor);
-
- idr_remove(&drm_minors_idr, minor->index);
-
- kfree(minor);
- *minor_p = NULL;
- return 0;
-}
-
-/**
- * Called via drm_exit() at module unload time or when pci device is
- * unplugged.
- *
- * Cleans up all DRM device, calling drm_lastclose().
- *
- * \sa drm_init
- */
-void drm_put_dev(struct drm_device *dev)
-{
- struct drm_driver *driver;
- struct drm_map_list *r_list, *list_temp;
-
- DRM_DEBUG("\n");
-
- if (!dev) {
- DRM_ERROR("cleanup called no dev\n");
- return;
- }
- driver = dev->driver;
-
- drm_lastclose(dev);
-
- if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->agp_mtrr >= 0) {
- int retval;
- retval = mtrr_del(dev->agp->agp_mtrr,
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size * 1024 * 1024);
- DRM_DEBUG("mtrr_del=%d\n", retval);
- }
-
- if (dev->driver->unload)
- dev->driver->unload(dev);
-
- if (drm_core_has_AGP(dev) && dev->agp) {
- kfree(dev->agp);
- dev->agp = NULL;
- }
-
- drm_vblank_cleanup(dev);
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_rmmap(dev, r_list->map);
- drm_ht_remove(&dev->map_hash);
-
- drm_ctxbitmap_cleanup(dev);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-
- if (driver->driver_features & DRIVER_GEM)
- drm_gem_destroy(dev);
-
- drm_put_minor(&dev->primary);
-
- if (dev->devname) {
- kfree(dev->devname);
- dev->devname = NULL;
- }
- kfree(dev);
-}
-EXPORT_SYMBOL(drm_put_dev);
+++ /dev/null
-
-/*
- * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
- * extra sysfs attribute from DRM. Normal drm_sysfs_class
- * does not allow adding attributes.
- *
- * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
- * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
- * Copyright (c) 2003-2004 IBM Corp.
- *
- * This file is released under the GPLv2
- *
- */
-
-#include <linux/device.h>
-#include <linux/kdev_t.h>
-#include <linux/gfp.h>
-#include <linux/err.h>
-
-#include "drm_sysfs.h"
-#include "drm_core.h"
-#include "drmP.h"
-
-#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
-#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
-
-static struct device_type drm_sysfs_device_minor = {
- .name = "drm_minor"
-};
-
-/**
- * drm_class_suspend - DRM class suspend hook
- * @dev: Linux device to suspend
- * @state: power state to enter
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its suspend hook, if present.
- */
-static int drm_class_suspend(struct device *dev, pm_message_t state)
-{
- if (dev->type == &drm_sysfs_device_minor) {
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->suspend)
- return drm_dev->driver->suspend(drm_dev, state);
- }
- return 0;
-}
-
-/**
- * drm_class_resume - DRM class resume hook
- * @dev: Linux device to resume
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its resume hook, if present.
- */
-static int drm_class_resume(struct device *dev)
-{
- if (dev->type == &drm_sysfs_device_minor) {
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->resume)
- return drm_dev->driver->resume(drm_dev);
- }
- return 0;
-}
-
-static char *drm_devnode(struct device *dev, mode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
-}
-
-static CLASS_ATTR_STRING(version, S_IRUGO,
- CORE_NAME " "
- __stringify(CORE_MAJOR) "."
- __stringify(CORE_MINOR) "."
- __stringify(CORE_PATCHLEVEL) " "
- CORE_DATE);
-
-/**
- * drm_sysfs_create - create a struct drm_sysfs_class structure
- * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
- * @name: pointer to a string for the name of this class.
- *
- * This is used to create DRM class pointer that can then be used
- * in calls to drm_sysfs_device_add().
- *
- * Note, the pointer created here is to be destroyed when finished by making a
- * call to drm_sysfs_destroy().
- */
-struct class *drm_sysfs_create(struct module *owner, char *name)
-{
- struct class *class;
- int err;
-
- class = class_create(owner, name);
- if (IS_ERR(class)) {
- err = PTR_ERR(class);
- goto err_out;
- }
-
- class->suspend = drm_class_suspend;
- class->resume = drm_class_resume;
-
- err = class_create_file(class, &class_attr_version.attr);
- if (err)
- goto err_out_class;
-
- class->devnode = drm_devnode;
-
- return class;
-
-err_out_class:
- class_destroy(class);
-err_out:
- return ERR_PTR(err);
-}
-
-/**
- * drm_sysfs_destroy - destroys DRM class
- *
- * Destroy the DRM device class.
- */
-void drm_sysfs_destroy(void)
-{
- if ((drm_class == NULL) || (IS_ERR(drm_class)))
- return;
- class_remove_file(drm_class, &class_attr_version.attr);
- class_destroy(drm_class);
-}
-
-/**
- * drm_sysfs_device_release - do nothing
- * @dev: Linux device
- *
- * Normally, this would free the DRM device associated with @dev, along
- * with cleaning up any other stuff. But we do that in the DRM core, so
- * this function can just return and hope that the core does its job.
- */
-static void drm_sysfs_device_release(struct device *dev)
-{
- memset(dev, 0, sizeof(struct device));
- return;
-}
-
-/*
- * Connector properties
- */
-static ssize_t status_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- enum drm_connector_status status;
-
- status = connector->funcs->detect(connector, true);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- drm_get_connector_status_name(status));
-}
-
-static ssize_t dpms_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- uint64_t dpms_status;
- int ret;
-
- ret = drm_connector_property_get_value(connector,
- dev->mode_config.dpms_property,
- &dpms_status);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- drm_get_dpms_name((int)dpms_status));
-}
-
-static ssize_t enabled_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
-
- return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
- "disabled");
-}
-
-static ssize_t edid_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
-{
- struct device *connector_dev = container_of(kobj, struct device, kobj);
- struct drm_connector *connector = to_drm_connector(connector_dev);
- unsigned char *edid;
- size_t size;
-
- if (!connector->edid_blob_ptr)
- return 0;
-
- edid = connector->edid_blob_ptr->data;
- size = connector->edid_blob_ptr->length;
- if (!edid)
- return 0;
-
- if (off >= size)
- return 0;
-
- if (off + count > size)
- count = size - off;
- memcpy(buf, edid + off, count);
-
- return count;
-}
-
-static ssize_t modes_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_display_mode *mode;
- int written = 0;
-
- list_for_each_entry(mode, &connector->modes, head) {
- written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
- mode->name);
- }
-
- return written;
-}
-
-static ssize_t subconnector_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- struct drm_property *prop = NULL;
- uint64_t subconnector;
- int is_tv = 0;
- int ret;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- prop = dev->mode_config.dvi_i_subconnector_property;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- prop = dev->mode_config.tv_subconnector_property;
- is_tv = 1;
- break;
- default:
- DRM_ERROR("Wrong connector type for this property\n");
- return 0;
- }
-
- if (!prop) {
- DRM_ERROR("Unable to find subconnector property\n");
- return 0;
- }
-
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
- drm_get_tv_subconnector_name((int)subconnector) :
- drm_get_dvi_i_subconnector_name((int)subconnector));
-}
-
-static ssize_t select_subconnector_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- struct drm_property *prop = NULL;
- uint64_t subconnector;
- int is_tv = 0;
- int ret;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- prop = dev->mode_config.dvi_i_select_subconnector_property;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- prop = dev->mode_config.tv_select_subconnector_property;
- is_tv = 1;
- break;
- default:
- DRM_ERROR("Wrong connector type for this property\n");
- return 0;
- }
-
- if (!prop) {
- DRM_ERROR("Unable to find select subconnector property\n");
- return 0;
- }
-
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
- drm_get_tv_select_name((int)subconnector) :
- drm_get_dvi_i_select_name((int)subconnector));
-}
-
-static struct device_attribute connector_attrs[] = {
- __ATTR_RO(status),
- __ATTR_RO(enabled),
- __ATTR_RO(dpms),
- __ATTR_RO(modes),
-};
-
-/* These attributes are for both DVI-I connectors and all types of tv-out. */
-static struct device_attribute connector_attrs_opt1[] = {
- __ATTR_RO(subconnector),
- __ATTR_RO(select_subconnector),
-};
-
-static struct bin_attribute edid_attr = {
- .attr.name = "edid",
- .attr.mode = 0444,
- .size = 0,
- .read = edid_show,
-};
-
-/**
- * drm_sysfs_connector_add - add an connector to sysfs
- * @connector: connector to add
- *
- * Create an connector device in sysfs, along with its associated connector
- * properties (so far, connection status, dpms, mode list & edid) and
- * generate a hotplug event so userspace knows there's a new connector
- * available.
- *
- * Note:
- * This routine should only be called *once* for each DRM minor registered.
- * A second call for an already registered device will trigger the BUG_ON
- * below.
- */
-int drm_sysfs_connector_add(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- int attr_cnt = 0;
- int opt_cnt = 0;
- int i;
- int ret = 0;
-
- /* We shouldn't get called more than once for the same connector */
- BUG_ON(device_is_registered(&connector->kdev));
-
- connector->kdev.parent = &dev->primary->kdev;
- connector->kdev.class = drm_class;
- connector->kdev.release = drm_sysfs_device_release;
-
- DRM_DEBUG("adding \"%s\" to sysfs\n",
- drm_get_connector_name(connector));
-
- dev_set_name(&connector->kdev, "card%d-%s",
- dev->primary->index, drm_get_connector_name(connector));
- ret = device_register(&connector->kdev);
-
- if (ret) {
- DRM_ERROR("failed to register connector device: %d\n", ret);
- goto out;
- }
-
- /* Standard attributes */
-
- for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
- ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
- if (ret)
- goto err_out_files;
- }
-
- /* Optional attributes */
- /*
- * In the long run it maybe a good idea to make one set of
- * optionals per connector type.
- */
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
- ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
- if (ret)
- goto err_out_files;
- }
- break;
- default:
- break;
- }
-
- ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
- if (ret)
- goto err_out_files;
-
- /* Let userspace know we have a new connector */
- drm_sysfs_hotplug_event(dev);
-
- return 0;
-
-err_out_files:
- for (i = 0; i < opt_cnt; i++)
- device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
- for (i = 0; i < attr_cnt; i++)
- device_remove_file(&connector->kdev, &connector_attrs[i]);
- device_unregister(&connector->kdev);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(drm_sysfs_connector_add);
-
-/**
- * drm_sysfs_connector_remove - remove an connector device from sysfs
- * @connector: connector to remove
- *
- * Remove @connector and its associated attributes from sysfs. Note that
- * the device model core will take care of sending the "remove" uevent
- * at this time, so we don't need to do it.
- *
- * Note:
- * This routine should only be called if the connector was previously
- * successfully registered. If @connector hasn't been registered yet,
- * you'll likely see a panic somewhere deep in sysfs code when called.
- */
-void drm_sysfs_connector_remove(struct drm_connector *connector)
-{
- int i;
-
- DRM_DEBUG("removing \"%s\" from sysfs\n",
- drm_get_connector_name(connector));
-
- for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
- device_remove_file(&connector->kdev, &connector_attrs[i]);
- sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
- device_unregister(&connector->kdev);
-}
-EXPORT_SYMBOL(drm_sysfs_connector_remove);
-
-/**
- * drm_sysfs_hotplug_event - generate a DRM uevent
- * @dev: DRM device
- *
- * Send a uevent for the DRM device specified by @dev. Currently we only
- * set HOTPLUG=1 in the uevent environment, but this could be expanded to
- * deal with other types of events.
- */
-void drm_sysfs_hotplug_event(struct drm_device *dev)
-{
- char *event_string = "HOTPLUG=1";
- char *envp[] = { event_string, NULL };
-
- DRM_DEBUG("generating hotplug event\n");
-
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
-}
-EXPORT_SYMBOL(drm_sysfs_hotplug_event);
-
-/**
- * drm_sysfs_device_add - adds a class device to sysfs for a character driver
- * @dev: DRM device to be added
- * @head: DRM head in question
- *
- * Add a DRM device to the DRM's device model class. We use @dev's PCI device
- * as the parent for the Linux device, and make sure it has a file containing
- * the driver we're using (for userspace compatibility).
- */
-int drm_sysfs_device_add(struct drm_minor *minor)
-{
- int err;
- char *minor_str;
-
- minor->kdev.parent = minor->dev->dev;
-
- minor->kdev.class = drm_class;
- minor->kdev.release = drm_sysfs_device_release;
- minor->kdev.devt = minor->device;
- minor->kdev.type = &drm_sysfs_device_minor;
- if (minor->type == DRM_MINOR_CONTROL)
- minor_str = "controlD%d";
- else if (minor->type == DRM_MINOR_RENDER)
- minor_str = "renderD%d";
- else
- minor_str = "card%d";
-
- dev_set_name(&minor->kdev, minor_str, minor->index);
-
- err = device_register(&minor->kdev);
- if (err) {
- DRM_ERROR("device add failed: %d\n", err);
- goto err_out;
- }
-
- return 0;
-
-err_out:
- return err;
-}
-
-/**
- * drm_sysfs_device_remove - remove DRM device
- * @dev: DRM device to remove
- *
- * This call unregisters and cleans up a class device that was created with a
- * call to drm_sysfs_device_add()
- */
-void drm_sysfs_device_remove(struct drm_minor *minor)
-{
- device_unregister(&minor->kdev);
-}
-
-
-/**
- * drm_class_device_register - Register a struct device in the drm class.
- *
- * @dev: pointer to struct device to register.
- *
- * @dev should have all relevant members pre-filled with the exception
- * of the class member. In particular, the device_type member must
- * be set.
- */
-
-int drm_class_device_register(struct device *dev)
-{
- dev->class = drm_class;
- return device_register(dev);
-}
-EXPORT_SYMBOL_GPL(drm_class_device_register);
-
-void drm_class_device_unregister(struct device *dev)
-{
- return device_unregister(dev);
-}
-EXPORT_SYMBOL_GPL(drm_class_device_unregister);
+++ /dev/null
-#include "drmP.h"
-
-#define CREATE_TRACE_POINTS
-#include "drm_trace.h"
+++ /dev/null
-#include "drmP.h"
-#include <linux/usb.h>
-
-#ifdef CONFIG_USB
-int drm_get_usb_dev(struct usb_interface *interface,
- const struct usb_device_id *id,
- struct drm_driver *driver)
-{
- struct drm_device *dev;
- struct usb_device *usbdev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- usbdev = interface_to_usbdev(interface);
- dev->usbdev = usbdev;
- dev->dev = &usbdev->dev;
-
- mutex_lock(&drm_global_mutex);
-
- ret = drm_fill_in_dev(dev, NULL, driver);
- if (ret) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g1;
- }
-
- usb_set_intfdata(interface, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g1;
-
- ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
- if (ret)
- goto err_g2;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, 0);
- if (ret)
- goto err_g3;
- }
-
- /* setup the grouping for the legacy output */
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_g3;
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- mutex_unlock(&drm_global_mutex);
-
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, dev->primary->index);
-
- return 0;
-
-err_g3:
- drm_put_minor(&dev->primary);
-err_g2:
- drm_put_minor(&dev->control);
-err_g1:
- kfree(dev);
- mutex_unlock(&drm_global_mutex);
- return ret;
-
-}
-EXPORT_SYMBOL(drm_get_usb_dev);
-
-static int drm_usb_get_irq(struct drm_device *dev)
-{
- return 0;
-}
-
-static const char *drm_usb_get_name(struct drm_device *dev)
-{
- return "USB";
-}
-
-static int drm_usb_set_busid(struct drm_device *dev,
- struct drm_master *master)
-{
- return 0;
-}
-
-static struct drm_bus drm_usb_bus = {
- .bus_type = DRIVER_BUS_USB,
- .get_irq = drm_usb_get_irq,
- .get_name = drm_usb_get_name,
- .set_busid = drm_usb_set_busid,
-};
-
-int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
-{
- int res;
- DRM_DEBUG("\n");
-
- INIT_LIST_HEAD(&driver->device_list);
- driver->kdriver.usb = udriver;
- driver->bus = &drm_usb_bus;
-
- res = usb_register(udriver);
- return res;
-}
-EXPORT_SYMBOL(drm_usb_init);
-
-void drm_usb_exit(struct drm_driver *driver,
- struct usb_driver *udriver)
-{
- usb_deregister(udriver);
-}
-EXPORT_SYMBOL(drm_usb_exit);
-#endif
+++ /dev/null
-/**
- * \file drm_vm.c
- * Memory mapping for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#if defined(__ia64__)
-#include <linux/efi.h>
-#include <linux/slab.h>
-#endif
-
-static void drm_vm_open(struct vm_area_struct *vma);
-static void drm_vm_close(struct vm_area_struct *vma);
-
-static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__i386__) || defined(__x86_64__)
- if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
- pgprot_val(tmp) |= _PAGE_PCD;
- pgprot_val(tmp) &= ~_PAGE_PWT;
- }
-#elif defined(__powerpc__)
- pgprot_val(tmp) |= _PAGE_NO_CACHE;
- if (map_type == _DRM_REGISTERS)
- pgprot_val(tmp) |= _PAGE_GUARDED;
-#elif defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end -
- vma->vm_start))
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__) || defined(__arm__)
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
-}
-
-static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- tmp |= _PAGE_NO_CACHE;
-#endif
- return tmp;
-}
-
-/**
- * \c fault method for AGP virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Find the right map and if it's AGP memory find the real physical page to
- * map, get the page, increment the use count and return it.
- */
-#if __OS_HAS_AGP
-static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- struct drm_hash_item *hash;
-
- /*
- * Find the right map
- */
- if (!drm_core_has_AGP(dev))
- goto vm_fault_error;
-
- if (!dev->agp || !dev->agp->cant_use_aperture)
- goto vm_fault_error;
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
- goto vm_fault_error;
-
- r_list = drm_hash_entry(hash, struct drm_map_list, hash);
- map = r_list->map;
-
- if (map && map->type == _DRM_AGP) {
- /*
- * Using vm_pgoff as a selector forces us to use this unusual
- * addressing scheme.
- */
- resource_size_t offset = (unsigned long)vmf->virtual_address -
- vma->vm_start;
- resource_size_t baddr = map->offset + offset;
- struct drm_agp_mem *agpmem;
- struct page *page;
-
-#ifdef __alpha__
- /*
- * Adjust to a bus-relative address
- */
- baddr -= dev->hose->mem_space->start;
-#endif
-
- /*
- * It's AGP memory - find the real physical page to map
- */
- list_for_each_entry(agpmem, &dev->agp->memory, head) {
- if (agpmem->bound <= baddr &&
- agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
- break;
- }
-
- if (&agpmem->head == &dev->agp->memory)
- goto vm_fault_error;
-
- /*
- * Get the page, inc the use count, and return it
- */
- offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
- page = agpmem->memory->pages[offset];
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG
- ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
- (unsigned long long)baddr,
- agpmem->memory->pages[offset],
- (unsigned long long)offset,
- page_count(page));
- return 0;
- }
-vm_fault_error:
- return VM_FAULT_SIGBUS; /* Disallow mremap */
-}
-#else /* __OS_HAS_AGP */
-static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return VM_FAULT_SIGBUS;
-}
-#endif /* __OS_HAS_AGP */
-
-/**
- * \c nopage method for shared virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Get the mapping, find the real physical page to map, get the page, and
- * return it.
- */
-static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_local_map *map = vma->vm_private_data;
- unsigned long offset;
- unsigned long i;
- struct page *page;
-
- if (!map)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
- i = (unsigned long)map->handle + offset;
- page = vmalloc_to_page((void *)i);
- if (!page)
- return VM_FAULT_SIGBUS;
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("shm_fault 0x%lx\n", offset);
- return 0;
-}
-
-/**
- * \c close method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Deletes map information if we are the last
- * person to close a mapping and it's not in the global maplist.
- */
-static void drm_vm_shm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
- int found_maps = 0;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
-
- map = vma->vm_private_data;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma->vm_private_data == map)
- found_maps++;
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- }
- }
-
- /* We were the only map that was found */
- if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
- /* Check to see if we are in the maplist, if we are not, then
- * we delete this mappings information.
- */
- found_maps = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map == map)
- found_maps++;
- }
-
- if (!found_maps) {
- drm_dma_handle_t dmah;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
- int retcode;
- retcode = mtrr_del(map->mtrr,
- map->offset,
- map->size);
- DRM_DEBUG("mtrr_del = %d\n", retcode);
- }
- iounmap(map->handle);
- break;
- case _DRM_SHM:
- vfree(map->handle);
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_pci_free(dev, &dmah);
- break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
- }
- kfree(map);
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * \c fault method for DMA virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
- */
-static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_device_dma *dma = dev->dma;
- unsigned long offset;
- unsigned long page_nr;
- struct page *page;
-
- if (!dma)
- return VM_FAULT_SIGBUS; /* Error */
- if (!dma->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
- page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
- page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
-
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
- return 0;
-}
-
-/**
- * \c fault method for scatter-gather virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
- */
-static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_local_map *map = vma->vm_private_data;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_sg_mem *entry = dev->sg;
- unsigned long offset;
- unsigned long map_offset;
- unsigned long page_offset;
- struct page *page;
-
- if (!entry)
- return VM_FAULT_SIGBUS; /* Error */
- if (!entry->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
- map_offset = map->offset - (unsigned long)dev->sg->virtual;
- page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
- page = entry->pagelist[page_offset];
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_fault(vma, vmf);
-}
-
-static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_shm_fault(vma, vmf);
-}
-
-static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_dma_fault(vma, vmf);
-}
-
-static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_sg_fault(vma, vmf);
-}
-
-/** AGP virtual memory operations */
-static const struct vm_operations_struct drm_vm_ops = {
- .fault = drm_vm_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Shared virtual memory operations */
-static const struct vm_operations_struct drm_vm_shm_ops = {
- .fault = drm_vm_shm_fault,
- .open = drm_vm_open,
- .close = drm_vm_shm_close,
-};
-
-/** DMA virtual memory operations */
-static const struct vm_operations_struct drm_vm_dma_ops = {
- .fault = drm_vm_dma_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Scatter-gather virtual memory operations */
-static const struct vm_operations_struct drm_vm_sg_ops = {
- .fault = drm_vm_sg_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/**
- * \c open method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Create a new drm_vma_entry structure as the \p vma private data entry and
- * add it to drm_device::vmalist.
- */
-void drm_vm_open_locked(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *vma_entry;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_inc(&dev->vma_count);
-
- vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
- if (vma_entry) {
- vma_entry->vma = vma;
- vma_entry->pid = current->pid;
- list_add(&vma_entry->head, &dev->vmalist);
- }
-}
-
-static void drm_vm_open(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
-void drm_vm_close_locked(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
-
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- break;
- }
- }
-}
-
-/**
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * Sets the virtual memory area operations structure to vm_dma_ops, the file
- * pointer, and calls vm_open().
- */
-static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- struct drm_device_dma *dma;
- unsigned long length = vma->vm_end - vma->vm_start;
-
- dev = priv->minor->dev;
- dma = dev->dma;
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- /* Length must match exact page count */
- if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
- return -EINVAL;
- }
-
- if (!capable(CAP_SYS_ADMIN) &&
- (dma->flags & _DRM_DMA_USE_PCI_RO)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- vma->vm_ops = &drm_vm_dma_ops;
-
- vma->vm_flags |= VM_RESERVED; /* Don't swap */
- vma->vm_flags |= VM_DONTEXPAND;
-
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
- return 0;
-}
-
-static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
-{
-#ifdef __alpha__
- return dev->hose->dense_mem_base - dev->hose->mem_space->start;
-#else
- return 0;
-#endif
-}
-
-/**
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * If the virtual memory area has no offset associated with it then it's a DMA
- * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
- * checks that the restricted flag is not set, sets the virtual memory operations
- * according to the mapping type and remaps the pages. Finally sets the file
- * pointer and calls vm_open().
- */
-int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- resource_size_t offset = 0;
- struct drm_hash_item *hash;
-
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- if (!priv->authenticated)
- return -EACCES;
-
- /* We check for "dma". On Apple's UniNorth, it's valid to have
- * the AGP mapped at physical address 0
- * --BenH.
- */
- if (!vma->vm_pgoff
-#if __OS_HAS_AGP
- && (!dev->agp
- || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
-#endif
- )
- return drm_mmap_dma(filp, vma);
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
- DRM_ERROR("Could not find map\n");
- return -EINVAL;
- }
-
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
- return -EPERM;
-
- /* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- switch (map->type) {
-#if !defined(__arm__)
- case _DRM_AGP:
- if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
- /*
- * On some platforms we can't talk to bus dma address from the CPU, so for
- * memory of type DRM_AGP, we'll deal with sorting out the real physical
- * pages and mappings in fault()
- */
-#if defined(__powerpc__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-#endif
- vma->vm_ops = &drm_vm_ops;
- break;
- }
- /* fall through to _DRM_FRAME_BUFFER... */
-#endif
- case _DRM_FRAME_BUFFER:
- case _DRM_REGISTERS:
- offset = drm_core_get_reg_ofs(dev);
- vma->vm_flags |= VM_IO; /* not in core dump */
- vma->vm_page_prot = drm_io_prot(map->type, vma);
-#if !defined(__arm__)
- if (io_remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-#else
- if (remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-#endif
-
- DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
- " offset = 0x%llx\n",
- map->type,
- vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
-
- vma->vm_ops = &drm_vm_ops;
- break;
- case _DRM_CONSISTENT:
- /* Consistent memory is really like shared memory. But
- * it's allocated in a different way, so avoid fault */
- if (remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(virt_to_page(map->handle)),
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- /* fall through to _DRM_SHM */
- case _DRM_SHM:
- vma->vm_ops = &drm_vm_shm_ops;
- vma->vm_private_data = (void *)map;
- /* Don't let this area swap. Change when
- DRM_KERNEL advisory is supported. */
- vma->vm_flags |= VM_RESERVED;
- break;
- case _DRM_SCATTER_GATHER:
- vma->vm_ops = &drm_vm_sg_ops;
- vma->vm_private_data = (void *)map;
- vma->vm_flags |= VM_RESERVED;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- break;
- default:
- return -EINVAL; /* This should never happen. */
- }
- vma->vm_flags |= VM_RESERVED; /* Don't swap */
- vma->vm_flags |= VM_DONTEXPAND;
-
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
- return 0;
-}
-
-int drm_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm_mmap_locked(filp, vma);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_mmap);
+++ /dev/null
-#
-# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful but, except
-# as otherwise stated in writing, without any warranty; without even the
-# implied warranty of merchantability or fitness for a particular purpose.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Imagination Technologies Ltd. <gpl-support@imgtec.com>
-# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
-#
-#
-
-MODULE = drm
-
-INCLUDES =
-
-SOURCES =
-
-ifneq ($(SUPPORT_DRI_DRM),1)
-$(error "SUPPORT_DRI_DRM must be set")
-endif
-
-SOURCES_ROOT = ..
-
-INT_SOURCE_LIST += \
- pvr_drm_stubs.c
-
-INT_SOURCES := $(addprefix $(SOURCES_ROOT)/,$(INT_SOURCE_LIST))
-SOURCES += $(INT_SOURCES)
-
-EXT_SOURCE_DIR := $(KERNELDIR)/drivers/gpu/drm
-
-EXT_BUILD_DIR := $(SOURCES_ROOT)/kbuild/tmp_$(PVR_BUILD_DIR)_$(BUILD)_$(MODULE)
-
-$(src)/$(EXT_BUILD_DIR)/%.c: $(EXT_SOURCE_DIR)/%.c
- $(SILENT)$(MKDIR) $(dir $@)
- $(SILENT)$(CP) $< $@
-
-clean-dirs += $(EXT_BUILD_DIR)
-
-EXT_SOURCE_LIST := \
- drm_auth.c drm_bufs.c drm_cache.c \
- drm_context.c drm_dma.c drm_drawable.c \
- drm_drv.c drm_fops.c drm_gem.c drm_ioctl.c drm_irq.c \
- drm_lock.c drm_memory.c drm_proc.c drm_stub.c drm_vm.c \
- drm_agpsupport.c drm_scatter.c ati_pcigart.c drm_pci.c \
- drm_sysfs.c drm_hashtab.c drm_sman.c drm_mm.c \
- drm_crtc.c drm_modes.c drm_edid.c \
- drm_info.c drm_debugfs.c drm_encoder_slave.c
-
-EXT_SOURCES := $(addprefix $(EXT_BUILD_DIR)/,$(EXT_SOURCE_LIST))
-SOURCES += $(EXT_SOURCES)
-
-INCLUDES += \
- -I$(KERNELDIR)/include/drm \
- -I$(EXT_SOURCE_DIR)
-
-MODULE_CFLAGS += -DCONFIG_PCI -Wno-error
-
-include $(EURASIAROOT)/eurasiacon/build/linux/kbuild/Makefile.kbuild_subdir_common
+++ /dev/null
-/**
- * \file ati_pcigart.c
- * ATI PCI GART support
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
-
-static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
- struct drm_ati_pcigart_info *gart_info)
-{
- gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
- PAGE_SIZE,
- gart_info->table_mask);
- if (gart_info->table_handle == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-static void drm_ati_free_pcigart_table(struct drm_device *dev,
- struct drm_ati_pcigart_info *gart_info)
-{
- drm_pci_free(dev, gart_info->table_handle);
- gart_info->table_handle = NULL;
-}
-
-int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
-{
- struct drm_sg_mem *entry = dev->sg;
- unsigned long pages;
- int i;
- int max_pages;
-
- /* we need to support large memory configurations */
- if (!entry) {
- DRM_ERROR("no scatter/gather memory!\n");
- return 0;
- }
-
- if (gart_info->bus_addr) {
-
- max_pages = (gart_info->table_size / sizeof(u32));
- pages = (entry->pages <= max_pages)
- ? entry->pages : max_pages;
-
- for (i = 0; i < pages; i++) {
- if (!entry->busaddr[i])
- break;
- pci_unmap_page(dev->pdev, entry->busaddr[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
- gart_info->bus_addr = 0;
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
- gart_info->table_handle) {
- drm_ati_free_pcigart_table(dev, gart_info);
- }
-
- return 1;
-}
-EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
-
-int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
-{
- struct drm_local_map *map = &gart_info->mapping;
- struct drm_sg_mem *entry = dev->sg;
- void *address = NULL;
- unsigned long pages;
- u32 *pci_gart = NULL, page_base, gart_idx;
- dma_addr_t bus_address = 0;
- int i, j, ret = 0;
- int max_ati_pages, max_real_pages;
-
- if (!entry) {
- DRM_ERROR("no scatter/gather memory!\n");
- goto done;
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
- DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
-
- ret = drm_ati_alloc_pcigart_table(dev, gart_info);
- if (ret) {
- DRM_ERROR("cannot allocate PCI GART page!\n");
- goto done;
- }
-
- pci_gart = gart_info->table_handle->vaddr;
- address = gart_info->table_handle->vaddr;
- bus_address = gart_info->table_handle->busaddr;
- } else {
- address = gart_info->addr;
- bus_address = gart_info->bus_addr;
- DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
- (unsigned long long)bus_address,
- (unsigned long)address);
- }
-
-
- max_ati_pages = (gart_info->table_size / sizeof(u32));
- max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
- pages = (entry->pages <= max_real_pages)
- ? entry->pages : max_real_pages;
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
- memset(pci_gart, 0, max_ati_pages * sizeof(u32));
- } else {
- memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
- }
-
- gart_idx = 0;
- for (i = 0; i < pages; i++) {
- /* we need to support large memory configurations */
- entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (entry->busaddr[i] == 0) {
- DRM_ERROR("unable to map PCIGART pages!\n");
- drm_ati_pcigart_cleanup(dev, gart_info);
- address = NULL;
- bus_address = 0;
- goto done;
- }
- page_base = (u32) entry->busaddr[i];
-
- for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
- u32 val;
-
- switch(gart_info->gart_reg_if) {
- case DRM_ATI_GART_IGP:
- val = page_base | 0xc;
- break;
- case DRM_ATI_GART_PCIE:
- val = (page_base >> 8) | 0xc;
- break;
- default:
- case DRM_ATI_GART_PCI:
- val = page_base;
- break;
- }
- if (gart_info->gart_table_location ==
- DRM_ATI_GART_MAIN)
- pci_gart[gart_idx] = cpu_to_le32(val);
- else
- DRM_WRITE32(map, gart_idx * sizeof(u32), val);
- gart_idx++;
- page_base += ATI_PCIGART_PAGE_SIZE;
- }
- }
- ret = 1;
-
-#if defined(__i386__) || defined(__x86_64__)
- wbinvd();
-#else
- mb();
-#endif
-
- done:
- gart_info->addr = address;
- gart_info->bus_addr = bus_address;
- return ret;
-}
-EXPORT_SYMBOL(drm_ati_pcigart_init);
+++ /dev/null
-/**
- * \file drm_agpsupport.c
- * DRM support for AGP/GART backend
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include <linux/module.h>
-
-#if __OS_HAS_AGP
-
-#include <asm/agp.h>
-
-/**
- * Get AGP information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a (output) drm_agp_info structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been initialized and acquired and fills in the
- * drm_agp_info structure with the information in drm_agp_head::agp_info.
- */
-int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
-{
- DRM_AGP_KERN *kern;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- kern = &dev->agp->agp_info;
- info->agp_version_major = kern->version.major;
- info->agp_version_minor = kern->version.minor;
- info->mode = kern->mode;
- info->aperture_base = kern->aper_base;
- info->aperture_size = kern->aper_size * 1024 * 1024;
- info->memory_allowed = kern->max_memory << PAGE_SHIFT;
- info->memory_used = kern->current_memory << PAGE_SHIFT;
- info->id_vendor = kern->device->vendor;
- info->id_device = kern->device->device;
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_agp_info);
-
-int drm_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_info *info = data;
- int err;
-
- err = drm_agp_info(dev, info);
- if (err)
- return err;
-
- return 0;
-}
-
-/**
- * Acquire the AGP device.
- *
- * \param dev DRM device that is to acquire AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_agp_acquire(struct drm_device * dev)
-{
- if (!dev->agp)
- return -ENODEV;
- if (dev->agp->acquired)
- return -EBUSY;
- if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
- return -ENODEV;
- dev->agp->acquired = 1;
- return 0;
-}
-
-EXPORT_SYMBOL(drm_agp_acquire);
-
-/**
- * Acquire the AGP device (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
-}
-
-/**
- * Release the AGP device.
- *
- * \param dev DRM device that is to release AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired and calls \c agp_backend_release.
- */
-int drm_agp_release(struct drm_device * dev)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- agp_backend_release(dev->agp->bridge);
- dev->agp->acquired = 0;
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_release);
-
-int drm_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_agp_release(dev);
-}
-
-/**
- * Enable the AGP bus.
- *
- * \param dev DRM device that has previously acquired AGP.
- * \param mode Requested AGP mode.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired but not enabled, and calls
- * \c agp_enable.
- */
-int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- dev->agp->mode = mode.mode;
- agp_enable(dev->agp->bridge, mode.mode);
- dev->agp->enabled = 1;
- return 0;
-}
-
-EXPORT_SYMBOL(drm_agp_enable);
-
-int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_mode *mode = data;
-
- return drm_agp_enable(dev, *mode);
-}
-
-/**
- * Allocate AGP memory.
- *
- * \param inode device inode.
- * \param file_priv file private pointer.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired, allocates the
- * memory via alloc_agp() and creates a drm_agp_mem entry for it.
- */
-int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
- DRM_AGP_MEM *memory;
- unsigned long pages;
- u32 type;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
- return -ENOMEM;
-
- memset(entry, 0, sizeof(*entry));
-
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- type = (u32) request->type;
- if (!(memory = drm_alloc_agp(dev, pages, type))) {
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->handle = (unsigned long)memory->key + 1;
- entry->memory = memory;
- entry->bound = 0;
- entry->pages = pages;
- list_add(&entry->head, &dev->agp->memory);
-
- request->handle = entry->handle;
- request->physical = memory->physical;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_alloc);
-
-
-int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_agp_alloc(dev, request);
-}
-
-/**
- * Search for the AGP memory entry associated with a handle.
- *
- * \param dev DRM device structure.
- * \param handle AGP memory handle.
- * \return pointer to the drm_agp_mem structure associated with \p handle.
- *
- * Walks through drm_agp_head::memory until finding a matching handle.
- */
-static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
- unsigned long handle)
-{
- struct drm_agp_mem *entry;
-
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if (entry->handle == handle)
- return entry;
- }
- return NULL;
-}
-
-/**
- * Unbind AGP memory from the GATT (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and acquired, looks-up the AGP memory
- * entry and passes it to the unbind_agp() function.
- */
-int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int ret;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (!entry->bound)
- return -EINVAL;
- ret = drm_unbind_agp(entry->memory);
- if (ret == 0)
- entry->bound = 0;
- return ret;
-}
-EXPORT_SYMBOL(drm_agp_unbind);
-
-
-int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_agp_unbind(dev, request);
-}
-
-/**
- * Bind AGP memory into the GATT (ioctl)
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and that no memory
- * is currently bound into the GATT. Looks-up the AGP memory entry and passes
- * it to bind_agp() function.
- */
-int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int retcode;
- int page;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (entry->bound)
- return -EINVAL;
- page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
- if ((retcode = drm_bind_agp(entry->memory, page)))
- return retcode;
- entry->bound = dev->agp->base + (page << PAGE_SHIFT);
- DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
- dev->agp->base, entry->bound);
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_bind);
-
-
-int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_agp_bind(dev, request);
-}
-
-/**
- * Free AGP memory (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and looks up the
- * AGP memory entry. If the memory it's currently bound, unbind it via
- * unbind_agp(). Frees it via free_agp() as well as the entry itself
- * and unlinks from the doubly linked list it's inserted in.
- */
-int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (entry->bound)
- drm_unbind_agp(entry->memory);
-
- list_del(&entry->head);
-
- drm_free_agp(entry->memory, entry->pages);
- kfree(entry);
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_free);
-
-
-
-int drm_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_agp_free(dev, request);
-}
-
-/**
- * Initialize the AGP resources.
- *
- * \return pointer to a drm_agp_head structure.
- *
- * Gets the drm_agp_t structure which is made available by the agpgart module
- * via the inter_module_* functions. Creates and initializes a drm_agp_head
- * structure.
- */
-struct drm_agp_head *drm_agp_init(struct drm_device *dev)
-{
- struct drm_agp_head *head = NULL;
-
- if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
- return NULL;
- memset((void *)head, 0, sizeof(*head));
- head->bridge = agp_find_bridge(dev->pdev);
- if (!head->bridge) {
- if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
- kfree(head);
- return NULL;
- }
- agp_copy_info(head->bridge, &head->agp_info);
- agp_backend_release(head->bridge);
- } else {
- agp_copy_info(head->bridge, &head->agp_info);
- }
- if (head->agp_info.chipset == NOT_SUPPORTED) {
- kfree(head);
- return NULL;
- }
- INIT_LIST_HEAD(&head->memory);
- head->cant_use_aperture = head->agp_info.cant_use_aperture;
- head->page_mask = head->agp_info.page_mask;
- head->base = head->agp_info.aper_base;
- return head;
-}
-
-/** Calls agp_allocate_memory() */
-DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
- size_t pages, u32 type)
-{
- return agp_allocate_memory(bridge, pages, type);
-}
-
-/** Calls agp_free_memory() */
-int drm_agp_free_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return 0;
- agp_free_memory(handle);
- return 1;
-}
-
-/** Calls agp_bind_memory() */
-int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
-{
- if (!handle)
- return -EINVAL;
- return agp_bind_memory(handle, start);
-}
-
-/** Calls agp_unbind_memory() */
-int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return -EINVAL;
- return agp_unbind_memory(handle);
-}
-
-/**
- * Binds a collection of pages into AGP memory at the given offset, returning
- * the AGP memory structure containing them.
- *
- * No reference is held on the pages during this time -- it is up to the
- * caller to handle that.
- */
-DRM_AGP_MEM *
-drm_agp_bind_pages(struct drm_device *dev,
- struct page **pages,
- unsigned long num_pages,
- uint32_t gtt_offset,
- u32 type)
-{
- DRM_AGP_MEM *mem;
- int ret, i;
-
- DRM_DEBUG("\n");
-
- mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
- type);
- if (mem == NULL) {
- DRM_ERROR("Failed to allocate memory for %ld pages\n",
- num_pages);
- return NULL;
- }
-
- for (i = 0; i < num_pages; i++)
- mem->pages[i] = pages[i];
- mem->page_count = num_pages;
-
- mem->is_flushed = true;
- ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
- if (ret != 0) {
- DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
- agp_free_memory(mem);
- return NULL;
- }
-
- return mem;
-}
-EXPORT_SYMBOL(drm_agp_bind_pages);
-
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
- agp_flush_chipset(dev->agp->bridge);
-}
-EXPORT_SYMBOL(drm_agp_chipset_flush);
-
-#endif /* __OS_HAS_AGP */
+++ /dev/null
-/**
- * \file drm_auth.c
- * IOCTLs for authentication
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Find the file with the given magic number.
- *
- * \param dev DRM device.
- * \param magic magic number.
- *
- * Searches in drm_device::magiclist within all files with the same hash key
- * the one with matching magic number, while holding the drm_device::struct_mutex
- * lock.
- */
-static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
-{
- struct drm_file *retval = NULL;
- struct drm_magic_entry *pt;
- struct drm_hash_item *hash;
- struct drm_device *dev = master->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
- pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
- retval = pt->priv;
- }
- mutex_unlock(&dev->struct_mutex);
- return retval;
-}
-
-/**
- * Adds a magic number.
- *
- * \param dev DRM device.
- * \param priv file private data.
- * \param magic magic number.
- *
- * Creates a drm_magic_entry structure and appends to the linked list
- * associated the magic number hash key in drm_device::magiclist, while holding
- * the drm_device::struct_mutex lock.
- */
-static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
- drm_magic_t magic)
-{
- struct drm_magic_entry *entry;
- struct drm_device *dev = master->minor->dev;
- DRM_DEBUG("%d\n", magic);
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
- entry->priv = priv;
- entry->hash_item.key = (unsigned long)magic;
- mutex_lock(&dev->struct_mutex);
- drm_ht_insert_item(&master->magiclist, &entry->hash_item);
- list_add_tail(&entry->head, &master->magicfree);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Remove a magic number.
- *
- * \param dev DRM device.
- * \param magic magic number.
- *
- * Searches and unlinks the entry in drm_device::magiclist with the magic
- * number hash key, while holding the drm_device::struct_mutex lock.
- */
-static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
-{
- struct drm_magic_entry *pt;
- struct drm_hash_item *hash;
- struct drm_device *dev = master->minor->dev;
-
- DRM_DEBUG("%d\n", magic);
-
- mutex_lock(&dev->struct_mutex);
- if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
- pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
- drm_ht_remove_item(&master->magiclist, hash);
- list_del(&pt->head);
- mutex_unlock(&dev->struct_mutex);
-
- kfree(pt);
-
- return 0;
-}
-
-/**
- * Get a unique magic number (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a resulting drm_auth structure.
- * \return zero on success, or a negative number on failure.
- *
- * If there is a magic number in drm_file::magic then use it, otherwise
- * searches an unique non-zero magic number and add it associating it with \p
- * file_priv.
- */
-int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- static drm_magic_t sequence = 0;
- static DEFINE_SPINLOCK(lock);
- struct drm_auth *auth = data;
-
- /* Find unique magic */
- if (file_priv->magic) {
- auth->magic = file_priv->magic;
- } else {
- do {
- spin_lock(&lock);
- if (!sequence)
- ++sequence; /* reserve 0 */
- auth->magic = sequence++;
- spin_unlock(&lock);
- } while (drm_find_file(file_priv->master, auth->magic));
- file_priv->magic = auth->magic;
- drm_add_magic(file_priv->master, file_priv, auth->magic);
- }
-
- DRM_DEBUG("%u\n", auth->magic);
-
- return 0;
-}
-
-/**
- * Authenticate with a magic.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_auth structure.
- * \return zero if authentication successed, or a negative number otherwise.
- *
- * Checks if \p file_priv is associated with the magic number passed in \arg.
- */
-int drm_authmagic(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_auth *auth = data;
- struct drm_file *file;
-
- DRM_DEBUG("%u\n", auth->magic);
- if ((file = drm_find_file(file_priv->master, auth->magic))) {
- file->authenticated = 1;
- drm_remove_magic(file_priv->master, auth->magic);
- return 0;
- }
- return -EINVAL;
-}
+++ /dev/null
-/**
- * \file drm_bufs.c
- * Generic buffer template
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/log2.h>
-#include <asm/shmparam.h>
-#include "drmP.h"
-
-resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
-{
- return pci_resource_start(dev->pdev, resource);
-}
-EXPORT_SYMBOL(drm_get_resource_start);
-
-resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
-{
- return pci_resource_len(dev->pdev, resource);
-}
-
-EXPORT_SYMBOL(drm_get_resource_len);
-
-static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
- struct drm_local_map *map)
-{
- struct drm_map_list *entry;
- list_for_each_entry(entry, &dev->maplist, head) {
- /*
- * Because the kernel-userspace ABI is fixed at a 32-bit offset
- * while PCI resources may live above that, we ignore the map
- * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
- * It is assumed that each driver will have only one resource of
- * each type.
- */
- if (!entry->map ||
- map->type != entry->map->type ||
- entry->master != dev->primary->master)
- continue;
- switch (map->type) {
- case _DRM_SHM:
- if (map->flags != _DRM_CONTAINS_LOCK)
- break;
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- return entry;
- default: /* Make gcc happy */
- ;
- }
- if (entry->map->offset == map->offset)
- return entry;
- }
-
- return NULL;
-}
-
-static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
- unsigned long user_token, int hashed_handle, int shm)
-{
- int use_hashed_handle, shift;
- unsigned long add;
-
-#if (BITS_PER_LONG == 64)
- use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
-#elif (BITS_PER_LONG == 32)
- use_hashed_handle = hashed_handle;
-#else
-#error Unsupported long size. Neither 64 nor 32 bits.
-#endif
-
- if (!use_hashed_handle) {
- int ret;
- hash->key = user_token >> PAGE_SHIFT;
- ret = drm_ht_insert_item(&dev->map_hash, hash);
- if (ret != -EINVAL)
- return ret;
- }
-
- shift = 0;
- add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
- if (shm && (SHMLBA > PAGE_SIZE)) {
- int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
-
- /* For shared memory, we have to preserve the SHMLBA
- * bits of the eventual vma->vm_pgoff value during
- * mmap(). Otherwise we run into cache aliasing problems
- * on some platforms. On these platforms, the pgoff of
- * a mmap() request is used to pick a suitable virtual
- * address for the mmap() region such that it will not
- * cause cache aliasing problems.
- *
- * Therefore, make sure the SHMLBA relevant bits of the
- * hash value we use are equal to those in the original
- * kernel virtual address.
- */
- shift = bits;
- add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
- }
-
- return drm_ht_just_insert_please(&dev->map_hash, hash,
- user_token, 32 - PAGE_SHIFT - 3,
- shift, add);
-}
-
-/**
- * Core function to create a range of memory available for mapping by a
- * non-root process.
- *
- * Adjusts the memory offset to its absolute value according to the mapping
- * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
- * applicable and if supported by the kernel.
- */
-static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags,
- struct drm_map_list ** maplist)
-{
- struct drm_local_map *map;
- struct drm_map_list *list;
- drm_dma_handle_t *dmah;
- unsigned long user_token;
- int ret;
-
- map = kmalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- return -ENOMEM;
-
- map->offset = offset;
- map->size = size;
- map->flags = flags;
- map->type = type;
-
- /* Only allow shared memory to be removable since we only keep enough
- * book keeping information about shared memory to allow for removal
- * when processes fork.
- */
- if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
- kfree(map);
- return -EINVAL;
- }
- DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
- (unsigned long long)map->offset, map->size, map->type);
-
- /* page-align _DRM_SHM maps. They are allocated here so there is no security
- * hole created by that and it works around various broken drivers that use
- * a non-aligned quantity to map the SAREA. --BenH
- */
- if (map->type == _DRM_SHM)
- map->size = PAGE_ALIGN(map->size);
-
- if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
- kfree(map);
- return -EINVAL;
- }
- map->mtrr = -1;
- map->handle = NULL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
- if (map->offset + (map->size-1) < map->offset ||
- map->offset < virt_to_phys(high_memory)) {
- kfree(map);
- return -EINVAL;
- }
-#endif
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
- /* Some drivers preinitialize some maps, without the X Server
- * needing to be aware of it. Therefore, we just return success
- * when the server tries to create a duplicate map.
- */
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if (list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size,
- list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
-
- if (drm_core_has_MTRR(dev)) {
- if (map->type == _DRM_FRAME_BUFFER ||
- (map->flags & _DRM_WRITE_COMBINING)) {
- map->mtrr = mtrr_add(map->offset, map->size,
- MTRR_TYPE_WRCOMB, 1);
- }
- }
- if (map->type == _DRM_REGISTERS) {
- map->handle = ioremap(map->offset, map->size);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- }
-
- break;
- case _DRM_SHM:
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if(list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size, list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
- map->handle = vmalloc_user(map->size);
- DRM_DEBUG("%lu %d %p\n",
- map->size, drm_order(map->size), map->handle);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- map->offset = (unsigned long)map->handle;
- if (map->flags & _DRM_CONTAINS_LOCK) {
- /* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->primary->master->lock.hw_lock != NULL) {
- vfree(map->handle);
- kfree(map);
- return -EBUSY;
- }
- dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
- }
- break;
- case _DRM_AGP: {
- struct drm_agp_mem *entry;
- int valid = 0;
-
- if (!drm_core_has_AGP(dev)) {
- kfree(map);
- return -EINVAL;
- }
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
- /* In some cases (i810 driver), user space may have already
- * added the AGP base itself, because dev->agp->base previously
- * only got set during AGP enable. So, only add the base
- * address if the map's offset isn't already within the
- * aperture.
- */
- if (map->offset < dev->agp->base ||
- map->offset > dev->agp->base +
- dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
- map->offset += dev->agp->base;
- }
- map->mtrr = dev->agp->agp_mtrr; /* for getmap */
-
- /* This assumes the DRM is in total control of AGP space.
- * It's not always the case as AGP can be in the control
- * of user space (i.e. i810 driver). So this loop will get
- * skipped and we double check that dev->agp->memory is
- * actually set as well as being invalid before EPERM'ing
- */
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if ((map->offset >= entry->bound) &&
- (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- kfree(map);
- return -EPERM;
- }
- DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
- (unsigned long long)map->offset, map->size);
-
- break;
- }
- case _DRM_GEM:
- DRM_ERROR("tried to addmap GEM object\n");
- break;
- case _DRM_SCATTER_GATHER:
- if (!dev->sg) {
- kfree(map);
- return -EINVAL;
- }
- map->offset += (unsigned long)dev->sg->virtual;
- break;
- case _DRM_CONSISTENT:
- /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
- * As we're limiting the address to 2^32-1 (or less),
- * casting it down to 32 bits is no problem, but we
- * need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
- if (!dmah) {
- kfree(map);
- return -ENOMEM;
- }
- map->handle = dmah->vaddr;
- map->offset = (unsigned long)dmah->busaddr;
- kfree(dmah);
- break;
- default:
- kfree(map);
- return -EINVAL;
- }
-
- list = kmalloc(sizeof(*list), GFP_KERNEL);
- if (!list) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- return -EINVAL;
- }
- memset(list, 0, sizeof(*list));
- list->map = map;
-
- mutex_lock(&dev->struct_mutex);
- list_add(&list->head, &dev->maplist);
-
- /* Assign a 32-bit handle */
- /* We do it here so that dev->struct_mutex protects the increment */
- user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
- map->offset;
- ret = drm_map_handle(dev, &list->hash, user_token, 0,
- (map->type == _DRM_SHM));
- if (ret) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- kfree(list);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- list->user_token = list->hash.key << PAGE_SHIFT;
- mutex_unlock(&dev->struct_mutex);
-
- if (!(map->flags & _DRM_DRIVER))
- list->master = dev->primary->master;
- *maplist = list;
- return 0;
- }
-
-int drm_addmap(struct drm_device * dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map ** map_ptr)
-{
- struct drm_map_list *list;
- int rc;
-
- rc = drm_addmap_core(dev, offset, size, type, flags, &list);
- if (!rc)
- *map_ptr = list->map;
- return rc;
-}
-
-EXPORT_SYMBOL(drm_addmap);
-
-/**
- * Ioctl to specify a range of memory that is available for mapping by a
- * non-root process.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_map structure.
- * \return zero on success or a negative value on error.
- *
- */
-int drm_addmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *maplist;
- int err;
-
- if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
- return -EPERM;
-
- err = drm_addmap_core(dev, map->offset, map->size, map->type,
- map->flags, &maplist);
-
- if (err)
- return err;
-
- /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
- map->handle = (void *)(unsigned long)maplist->user_token;
- return 0;
-}
-
-/**
- * Remove a map private from list and deallocate resources if the mapping
- * isn't in use.
- *
- * Searches the map on drm_device::maplist, removes it from the list, see if
- * its being used, and free any associate resource (such as MTRR's) if it's not
- * being on use.
- *
- * \sa drm_addmap
- */
-int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
-{
- struct drm_map_list *r_list = NULL, *list_t;
- drm_dma_handle_t dmah;
- int found = 0;
- struct drm_master *master;
-
- /* Find the list entry for the map and remove it */
- list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
- if (r_list->map == map) {
- master = r_list->master;
- list_del(&r_list->head);
- drm_ht_remove_key(&dev->map_hash,
- r_list->user_token >> PAGE_SHIFT);
- kfree(r_list);
- found = 1;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- iounmap(map->handle);
- /* FALLTHROUGH */
- case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
- int retcode;
- retcode = mtrr_del(map->mtrr, map->offset, map->size);
- DRM_DEBUG("mtrr_del=%d\n", retcode);
- }
- break;
- case _DRM_SHM:
- vfree(map->handle);
- if (master) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL; /* SHM removed */
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_pci_free(dev, &dmah);
- break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
- }
- kfree(map);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_rmmap_locked);
-
-int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
-{
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm_rmmap_locked(dev, map);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_rmmap);
-
-/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
- * the last close of the device, and this is necessary for cleanup when things
- * exit uncleanly. Therefore, having userland manually remove mappings seems
- * like a pointless exercise since they're going away anyway.
- *
- * One use case might be after addmap is allowed for normal users for SHM and
- * gets used by drivers that the server doesn't need to care about. This seems
- * unlikely.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_map structure.
- * \return zero on success or a negative value on error.
- */
-int drm_rmmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->user_token == (unsigned long)request->handle &&
- r_list->map->flags & _DRM_REMOVABLE) {
- map = r_list->map;
- break;
- }
- }
-
- /* List has wrapped around to the head pointer, or its empty we didn't
- * find anything.
- */
- if (list_empty(&dev->maplist) || !map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- /* Register and framebuffer maps are permanent */
- if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- ret = drm_rmmap_locked(dev, map);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-/**
- * Cleanup after an error on one of the addbufs() functions.
- *
- * \param dev DRM device.
- * \param entry buffer entry where the error occurred.
- *
- * Frees any pages and buffers associated with the given entry.
- */
-static void drm_cleanup_buf_error(struct drm_device * dev,
- struct drm_buf_entry * entry)
-{
- int i;
-
- if (entry->seg_count) {
- for (i = 0; i < entry->seg_count; i++) {
- if (entry->seglist[i]) {
- drm_pci_free(dev, entry->seglist[i]);
- }
- }
- kfree(entry->seglist);
-
- entry->seg_count = 0;
- }
-
- if (entry->buf_count) {
- for (i = 0; i < entry->buf_count; i++) {
- kfree(entry->buflist[i].dev_private);
- }
- kfree(entry->buflist);
-
- entry->buf_count = 0;
- }
-}
-
-#if __OS_HAS_AGP
-/**
- * Add AGP buffers for DMA transfers.
- *
- * \param dev struct drm_device to which the buffers are to be added.
- * \param request pointer to a struct drm_buf_desc describing the request.
- * \return zero on success or a negative number on failure.
- *
- * After some sanity checks creates a drm_buf structure for each buffer and
- * reallocates the buffer list of the same size order to accommodate the new
- * buffers.
- */
-int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_agp_mem *agp_entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i, valid;
- struct drm_buf **temp_buflist;
-
- if (!dma)
- return -EINVAL;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = dev->agp->base + request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lx\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- /* Make sure buffers are located in AGP memory that we own */
- valid = 0;
- list_for_each_entry(agp_entry, &dev->agp->memory, head) {
- if ((agp_offset >= agp_entry->bound) &&
- (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- DRM_DEBUG("zone invalid\n");
- return -EINVAL;
- }
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_AGP;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-EXPORT_SYMBOL(drm_addbufs_agp);
-#endif /* __OS_HAS_AGP */
-
-int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- int count;
- int order;
- int size;
- int total;
- int page_order;
- struct drm_buf_entry *entry;
- drm_dma_handle_t *dmah;
- struct drm_buf *buf;
- int alignment;
- unsigned long offset;
- int i;
- int byte_count;
- int page_count;
- unsigned long *temp_pagelist;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
- request->count, request->size, size, order, dev->queue_count);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
-
- entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
- if (!entry->seglist) {
- kfree(entry->buflist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->seglist, 0, count * sizeof(*entry->seglist));
-
- /* Keep the original pagelist until we know all the allocations
- * have succeeded
- */
- temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
- sizeof(*dma->pagelist), GFP_KERNEL);
- if (!temp_pagelist) {
- kfree(entry->buflist);
- kfree(entry->seglist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memcpy(temp_pagelist,
- dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
- DRM_DEBUG("pagelist: %d entries\n",
- dma->page_count + (count << page_order));
-
- entry->buf_size = size;
- entry->page_order = page_order;
- byte_count = 0;
- page_count = 0;
-
- while (entry->buf_count < count) {
-
- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
-
- if (!dmah) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- entry->seglist[entry->seg_count++] = dmah;
- for (i = 0; i < (1 << page_order); i++) {
- DRM_DEBUG("page %d @ 0x%08lx\n",
- dma->page_count + page_count,
- (unsigned long)dmah->vaddr + PAGE_SIZE * i);
- temp_pagelist[dma->page_count + page_count++]
- = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
- }
- for (offset = 0;
- offset + size <= total && entry->buf_count < count;
- offset += alignment, ++entry->buf_count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
- buf->offset = (dma->byte_count + byte_count + offset);
- buf->address = (void *)(dmah->vaddr + offset);
- buf->bus_address = dmah->busaddr + offset;
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size,
- GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
- DRM_DEBUG("buffer %d @ %p\n",
- entry->buf_count, buf->address);
- }
- byte_count += PAGE_SIZE << page_order;
- }
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- /* No allocations failed, so now we can replace the orginal pagelist
- * with the new one.
- */
- if (dma->page_count) {
- kfree(dma->pagelist);
- }
- dma->pagelist = temp_pagelist;
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += entry->seg_count << page_order;
- dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- if (request->flags & _DRM_PCI_BUFFER_RO)
- dma->flags = _DRM_DMA_USE_PCI_RO;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-
-}
-EXPORT_SYMBOL(drm_addbufs_pci);
-
-static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kmalloc(count * sizeof(*entry->buflist),
- GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset
- + (unsigned long)dev->sg->virtual);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_SG;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kmalloc(count * sizeof(*entry->buflist),
- GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_FB;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-
-/**
- * Add buffers for DMA transfers (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_buf_desc request.
- * \return zero on success or a negative number on failure.
- *
- * According with the memory type specified in drm_buf_desc::flags and the
- * build options, it dispatches the call either to addbufs_agp(),
- * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
- * PCI memory respectively.
- */
-int drm_addbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_desc *request = data;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
-#if __OS_HAS_AGP
- if (request->flags & _DRM_AGP_BUFFER)
- ret = drm_addbufs_agp(dev, request);
- else
-#endif
- if (request->flags & _DRM_SG_BUFFER)
- ret = drm_addbufs_sg(dev, request);
- else if (request->flags & _DRM_FB_BUFFER)
- ret = drm_addbufs_fb(dev, request);
- else
- ret = drm_addbufs_pci(dev, request);
-
- return ret;
-}
-
-/**
- * Get information about the buffer mappings.
- *
- * This was originally mean for debugging purposes, or by a sophisticated
- * client library to determine how best to use the available buffers (e.g.,
- * large buffers can be used for image transfer).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_info structure.
- * \return zero on success or a negative number on failure.
- *
- * Increments drm_device::buf_use while holding the drm_device::count_lock
- * lock, preventing of allocating more buffers after this call. Information
- * about each requested buffer is then copied into user space.
- */
-int drm_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_info *request = data;
- int i;
- int count;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->count_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- ++dev->buf_use; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
-
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count)
- ++count;
- }
-
- DRM_DEBUG("count = %d\n", count);
-
- if (request->count >= count) {
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count) {
- struct drm_buf_desc __user *to =
- &request->list[count];
- struct drm_buf_entry *from = &dma->bufs[i];
- struct drm_freelist *list = &dma->bufs[i].freelist;
- if (copy_to_user(&to->count,
- &from->buf_count,
- sizeof(from->buf_count)) ||
- copy_to_user(&to->size,
- &from->buf_size,
- sizeof(from->buf_size)) ||
- copy_to_user(&to->low_mark,
- &list->low_mark,
- sizeof(list->low_mark)) ||
- copy_to_user(&to->high_mark,
- &list->high_mark,
- sizeof(list->high_mark)))
- return -EFAULT;
-
- DRM_DEBUG("%d %d %d %d %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].buf_size,
- dma->bufs[i].freelist.low_mark,
- dma->bufs[i].freelist.high_mark);
- ++count;
- }
- }
- }
- request->count = count;
-
- return 0;
-}
-
-/**
- * Specifies a low and high water mark for buffer allocation
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg a pointer to a drm_buf_desc structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies that the size order is bounded between the admissible orders and
- * updates the respective drm_device_dma::bufs entry low and high water mark.
- *
- * \note This ioctl is deprecated and mostly never used.
- */
-int drm_markbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_desc *request = data;
- int order;
- struct drm_buf_entry *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d, %d, %d\n",
- request->size, request->low_mark, request->high_mark);
- order = drm_order(request->size);
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- entry = &dma->bufs[order];
-
- if (request->low_mark < 0 || request->low_mark > entry->buf_count)
- return -EINVAL;
- if (request->high_mark < 0 || request->high_mark > entry->buf_count)
- return -EINVAL;
-
- entry->freelist.low_mark = request->low_mark;
- entry->freelist.high_mark = request->high_mark;
-
- return 0;
-}
-
-/**
- * Unreserve the buffers in list, previously reserved using drmDMA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_free structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls free_buffer() for each used buffer.
- * This function is primarily used for debugging.
- */
-int drm_freebufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_free *request = data;
- int i;
- int idx;
- struct drm_buf *buf;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d\n", request->count);
- for (i = 0; i < request->count; i++) {
- if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
- return -EFAULT;
- if (idx < 0 || idx >= dma->buf_count) {
- DRM_ERROR("Index %d (of %d max)\n",
- idx, dma->buf_count - 1);
- return -EINVAL;
- }
- buf = dma->buflist[idx];
- if (buf->file_priv != file_priv) {
- DRM_ERROR("Process %d freeing buffer not owned\n",
- task_pid_nr(current));
- return -EINVAL;
- }
- drm_free_buffer(dev, buf);
- }
-
- return 0;
-}
-
-/**
- * Maps all of the DMA buffers into client-virtual space (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
- * about each buffer into user space. For PCI buffers, it calls do_mmap() with
- * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
- * drm_mmap_dma().
- */
-int drm_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int retcode = 0;
- const int zero = 0;
- unsigned long virtual;
- unsigned long address;
- struct drm_buf_map *request = data;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->count_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- dev->buf_use++; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
-
- if (request->count >= dma->buf_count) {
- if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
- || (drm_core_check_feature(dev, DRIVER_SG)
- && (dma->flags & _DRM_DMA_USE_SG))
- || (drm_core_check_feature(dev, DRIVER_FB_DMA)
- && (dma->flags & _DRM_DMA_USE_FB))) {
- struct drm_local_map *map = dev->agp_buffer_map;
- unsigned long token = dev->agp_buffer_token;
-
- if (!map) {
- retcode = -EINVAL;
- goto done;
- }
- down_write(¤t->mm->mmap_sem);
- virtual = do_mmap(file_priv->filp, 0, map->size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- token);
- up_write(¤t->mm->mmap_sem);
- } else {
- down_write(¤t->mm->mmap_sem);
- virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
- PROT_READ | PROT_WRITE,
- MAP_SHARED, 0);
- up_write(¤t->mm->mmap_sem);
- }
- if (virtual > -1024UL) {
- /* Real error */
- retcode = (signed long)virtual;
- goto done;
- }
- request->virtual = (void __user *)virtual;
-
- for (i = 0; i < dma->buf_count; i++) {
- if (copy_to_user(&request->list[i].idx,
- &dma->buflist[i]->idx,
- sizeof(request->list[0].idx))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request->list[i].total,
- &dma->buflist[i]->total,
- sizeof(request->list[0].total))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request->list[i].used,
- &zero, sizeof(zero))) {
- retcode = -EFAULT;
- goto done;
- }
- address = virtual + dma->buflist[i]->offset; /* *** */
- if (copy_to_user(&request->list[i].address,
- &address, sizeof(address))) {
- retcode = -EFAULT;
- goto done;
- }
- }
- }
- done:
- request->count = dma->buf_count;
- DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
-
- return retcode;
-}
-
-/**
- * Compute size order. Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- *
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order(unsigned long size)
-{
- int order;
- unsigned long tmp;
-
- for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
-
- if (size & (size - 1))
- ++order;
-
- return order;
-}
-EXPORT_SYMBOL(drm_order);
+++ /dev/null
-/**************************************************************************
- *
- * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-
-#if defined(CONFIG_X86)
-static void
-drm_clflush_page(struct page *page)
-{
- uint8_t *page_virtual;
- unsigned int i;
-
- if (unlikely(page == NULL))
- return;
-
- page_virtual = kmap_atomic(page, KM_USER0);
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- clflush(page_virtual + i);
- kunmap_atomic(page_virtual, KM_USER0);
-}
-
-static void drm_cache_flush_clflush(struct page *pages[],
- unsigned long num_pages)
-{
- unsigned long i;
-
- mb();
- for (i = 0; i < num_pages; i++)
- drm_clflush_page(*pages++);
- mb();
-}
-
-static void
-drm_clflush_ipi_handler(void *null)
-{
- wbinvd();
-}
-#endif
-
-void
-drm_clflush_pages(struct page *pages[], unsigned long num_pages)
-{
-
-#if defined(CONFIG_X86)
- if (cpu_has_clflush) {
- drm_cache_flush_clflush(pages, num_pages);
- return;
- }
-
- if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
-
-#elif defined(__powerpc__)
- unsigned long i;
- for (i = 0; i < num_pages; i++) {
- struct page *page = pages[i];
- void *page_virtual;
-
- if (unlikely(page == NULL))
- continue;
-
- page_virtual = kmap_atomic(page, KM_USER0);
- flush_dcache_range((unsigned long)page_virtual,
- (unsigned long)page_virtual + PAGE_SIZE);
- kunmap_atomic(page_virtual, KM_USER0);
- }
-#else
- printk(KERN_ERR "Architecture has no drm_cache.c support\n");
- WARN_ON_ONCE(1);
-#endif
-}
-EXPORT_SYMBOL(drm_clflush_pages);
+++ /dev/null
-/**
- * \file drm_context.c
- * IOCTLs for generic contexts
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * ChangeLog:
- * 2001-11-16 Torsten Duwe <duwe@caldera.de>
- * added context constructor/destructor hooks,
- * needed by SiS driver's memory management.
- */
-
-#include "drmP.h"
-
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-/**
- * Free a handle from the context bitmap.
- *
- * \param dev DRM device.
- * \param ctx_handle context handle.
- *
- * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
- * lock.
- */
-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
-{
- mutex_lock(&dev->struct_mutex);
- idr_remove(&dev->ctx_idr, ctx_handle);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * Context bitmap allocation.
- *
- * \param dev DRM device.
- * \return (non-negative) context handle on success or a negative number on failure.
- *
- * Allocate a new idr from drm_device::ctx_idr while holding the
- * drm_device::struct_mutex lock.
- */
-static int drm_ctxbitmap_next(struct drm_device * dev)
-{
- int new_id;
- int ret;
-
-again:
- if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
- mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&dev->ctx_idr, NULL,
- DRM_RESERVED_CONTEXTS, &new_id);
- if (ret == -EAGAIN) {
- mutex_unlock(&dev->struct_mutex);
- goto again;
- }
- mutex_unlock(&dev->struct_mutex);
- return new_id;
-}
-
-/**
- * Context bitmap initialization.
- *
- * \param dev DRM device.
- *
- * Initialise the drm_device::ctx_idr
- */
-int drm_ctxbitmap_init(struct drm_device * dev)
-{
- idr_init(&dev->ctx_idr);
- return 0;
-}
-
-/**
- * Context bitmap cleanup.
- *
- * \param dev DRM device.
- *
- * Free all idr members using drm_ctx_sarea_free helper function
- * while holding the drm_device::struct_mutex lock.
- */
-void drm_ctxbitmap_cleanup(struct drm_device * dev)
-{
- mutex_lock(&dev->struct_mutex);
- idr_remove_all(&dev->ctx_idr);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name Per Context SAREA Support */
-/*@{*/
-
-/**
- * Get per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Gets the map from drm_device::ctx_idr with the handle specified and
- * returns its handle.
- */
-int drm_getsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map;
- struct drm_map_list *_entry;
-
- mutex_lock(&dev->struct_mutex);
-
- map = idr_find(&dev->ctx_idr, request->ctx_id);
- if (!map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- request->handle = NULL;
- list_for_each_entry(_entry, &dev->maplist, head) {
- if (_entry->map == map) {
- request->handle =
- (void *)(unsigned long)_entry->user_token;
- break;
- }
- }
- if (request->handle == NULL)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * Set per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Searches the mapping specified in \p arg and update the entry in
- * drm_device::ctx_idr with it.
- */
-int drm_setsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list = NULL;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map
- && r_list->user_token == (unsigned long) request->handle)
- goto found;
- }
- bad:
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
-
- found:
- map = r_list->map;
- if (!map)
- goto bad;
-
- if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
- goto bad;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name The actual DRM context handling routines */
-/*@{*/
-
-/**
- * Switch context.
- *
- * \param dev DRM device.
- * \param old old context handle.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Attempt to set drm_device::context_flag.
- */
-static int drm_context_switch(struct drm_device * dev, int old, int new)
-{
- if (test_and_set_bit(0, &dev->context_flag)) {
- DRM_ERROR("Reentering -- FIXME\n");
- return -EBUSY;
- }
-
- DRM_DEBUG("Context switch from %d to %d\n", old, new);
-
- if (new == dev->last_context) {
- clear_bit(0, &dev->context_flag);
- return 0;
- }
-
- return 0;
-}
-
-/**
- * Complete context switch.
- *
- * \param dev DRM device.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Updates drm_device::last_context and drm_device::last_switch. Verifies the
- * hardware lock is held, clears the drm_device::context_flag and wakes up
- * drm_device::context_wait.
- */
-static int drm_context_switch_complete(struct drm_device *dev,
- struct drm_file *file_priv, int new)
-{
- dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
- dev->last_switch = jiffies;
-
- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
- DRM_ERROR("Lock isn't held after context switch\n");
- }
-
- /* If a context switch is ever initiated
- when the kernel holds the lock, release
- that lock here. */
- clear_bit(0, &dev->context_flag);
- wake_up(&dev->context_wait);
-
- return 0;
-}
-
-/**
- * Reserve contexts.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_res structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_resctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_res *res = data;
- struct drm_ctx ctx;
- int i;
-
- if (res->count >= DRM_RESERVED_CONTEXTS) {
- memset(&ctx, 0, sizeof(ctx));
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- ctx.handle = i;
- if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
- return -EFAULT;
- }
- }
- res->count = DRM_RESERVED_CONTEXTS;
-
- return 0;
-}
-
-/**
- * Add context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Get a new handle for the context and copy to userspace.
- */
-int drm_addctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_list *ctx_entry;
- struct drm_ctx *ctx = data;
-
- ctx->handle = drm_ctxbitmap_next(dev);
- if (ctx->handle == DRM_KERNEL_CONTEXT) {
- /* Skip kernel's context and get a new one. */
- ctx->handle = drm_ctxbitmap_next(dev);
- }
- DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle == -1) {
- DRM_DEBUG("Not enough free contexts.\n");
- /* Should this return -EBUSY instead? */
- return -ENOMEM;
- }
-
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_ctor)
- if (!dev->driver->context_ctor(dev, ctx->handle)) {
- DRM_DEBUG("Running out of ctxs or memory.\n");
- return -ENOMEM;
- }
- }
-
- ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
- if (!ctx_entry) {
- DRM_DEBUG("out of memory\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&ctx_entry->head);
- ctx_entry->handle = ctx->handle;
- ctx_entry->tag = file_priv;
-
- mutex_lock(&dev->ctxlist_mutex);
- list_add(&ctx_entry->head, &dev->ctxlist);
- ++dev->ctx_count;
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- /* This does nothing */
- return 0;
-}
-
-/**
- * Get context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- /* This is 0, because we don't handle any context flags */
- ctx->flags = 0;
-
- return 0;
-}
-
-/**
- * Switch context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch().
- */
-int drm_switchctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- DRM_DEBUG("%d\n", ctx->handle);
- return drm_context_switch(dev, dev->last_context, ctx->handle);
-}
-
-/**
- * New context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch_complete().
- */
-int drm_newctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- DRM_DEBUG("%d\n", ctx->handle);
- drm_context_switch_complete(dev, file_priv, ctx->handle);
-
- return 0;
-}
-
-/**
- * Remove context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
- */
-int drm_rmctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev, ctx->handle);
- drm_ctxbitmap_free(dev, ctx->handle);
- }
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->handle == ctx->handle) {
- list_del(&pos->head);
- kfree(pos);
- --dev->ctx_count;
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-/*@}*/
+++ /dev/null
-/*
- * Copyright (c) 2006-2008 Intel Corporation
- * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2008 Red Hat Inc.
- *
- * DRM core CRTC related functions
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- *
- * Authors:
- * Keith Packard
- * Eric Anholt <eric@anholt.net>
- * Dave Airlie <airlied@linux.ie>
- * Jesse Barnes <jesse.barnes@intel.com>
- */
-#include <linux/list.h>
-#include "drm.h"
-#include "drmP.h"
-#include "drm_crtc.h"
-
-struct drm_prop_enum_list {
- int type;
- char *name;
-};
-
-/* Avoid boilerplate. I'm tired of typing. */
-#define DRM_ENUM_NAME_FN(fnname, list) \
- char *fnname(int val) \
- { \
- int i; \
- for (i = 0; i < ARRAY_SIZE(list); i++) { \
- if (list[i].type == val) \
- return list[i].name; \
- } \
- return "(unknown)"; \
- }
-
-/*
- * Global properties
- */
-static struct drm_prop_enum_list drm_dpms_enum_list[] =
-{ { DRM_MODE_DPMS_ON, "On" },
- { DRM_MODE_DPMS_STANDBY, "Standby" },
- { DRM_MODE_DPMS_SUSPEND, "Suspend" },
- { DRM_MODE_DPMS_OFF, "Off" }
-};
-
-DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
-
-/*
- * Optional properties
- */
-static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
-{
- { DRM_MODE_SCALE_NONE, "None" },
- { DRM_MODE_SCALE_FULLSCREEN, "Full" },
- { DRM_MODE_SCALE_CENTER, "Center" },
- { DRM_MODE_SCALE_ASPECT, "Full aspect" },
-};
-
-static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
-{
- { DRM_MODE_DITHERING_OFF, "Off" },
- { DRM_MODE_DITHERING_ON, "On" },
-};
-
-/*
- * Non-global properties, but "required" for certain connectors.
- */
-static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
- { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
-
-static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
- { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
- drm_dvi_i_subconnector_enum_list)
-
-static struct drm_prop_enum_list drm_tv_select_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-
-static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
- drm_tv_subconnector_enum_list)
-
-static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
- { DRM_MODE_DIRTY_OFF, "Off" },
- { DRM_MODE_DIRTY_ON, "On" },
- { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
-};
-
-DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
- drm_dirty_info_enum_list)
-
-struct drm_conn_prop_enum_list {
- int type;
- char *name;
- int count;
-};
-
-/*
- * Connector and encoder types.
- */
-static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
- { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
- { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
- { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
- { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
- { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
- { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
- { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
- { DRM_MODE_CONNECTOR_Component, "Component", 0 },
- { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
- { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
- { DRM_MODE_CONNECTOR_TV, "TV", 0 },
-};
-
-static struct drm_prop_enum_list drm_encoder_enum_list[] =
-{ { DRM_MODE_ENCODER_NONE, "None" },
- { DRM_MODE_ENCODER_DAC, "DAC" },
- { DRM_MODE_ENCODER_TMDS, "TMDS" },
- { DRM_MODE_ENCODER_LVDS, "LVDS" },
- { DRM_MODE_ENCODER_TVDAC, "TV" },
-};
-
-char *drm_get_encoder_name(struct drm_encoder *encoder)
-{
- static char buf[32];
-
- snprintf(buf, 32, "%s-%d",
- drm_encoder_enum_list[encoder->encoder_type].name,
- encoder->base.id);
- return buf;
-}
-EXPORT_SYMBOL(drm_get_encoder_name);
-
-char *drm_get_connector_name(struct drm_connector *connector)
-{
- static char buf[32];
-
- snprintf(buf, 32, "%s-%d",
- drm_connector_enum_list[connector->connector_type].name,
- connector->connector_type_id);
- return buf;
-}
-EXPORT_SYMBOL(drm_get_connector_name);
-
-char *drm_get_connector_status_name(enum drm_connector_status status)
-{
- if (status == connector_status_connected)
- return "connected";
- else if (status == connector_status_disconnected)
- return "disconnected";
- else
- return "unknown";
-}
-
-/**
- * drm_mode_object_get - allocate a new identifier
- * @dev: DRM device
- * @ptr: object pointer, used to generate unique ID
- * @type: object type
- *
- * LOCKING:
- *
- * Create a unique identifier based on @ptr in @dev's identifier space. Used
- * for tracking modes, CRTCs and connectors.
- *
- * RETURNS:
- * New unique (relative to other objects in @dev) integer identifier for the
- * object.
- */
-static int drm_mode_object_get(struct drm_device *dev,
- struct drm_mode_object *obj, uint32_t obj_type)
-{
- int new_id = 0;
- int ret;
-
-again:
- if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Ran out memory getting a mode number\n");
- return -EINVAL;
- }
-
- mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
- mutex_unlock(&dev->mode_config.idr_mutex);
- if (ret == -EAGAIN)
- goto again;
-
- obj->id = new_id;
- obj->type = obj_type;
- return 0;
-}
-
-/**
- * drm_mode_object_put - free an identifer
- * @dev: DRM device
- * @id: ID to free
- *
- * LOCKING:
- * Caller must hold DRM mode_config lock.
- *
- * Free @id from @dev's unique identifier pool.
- */
-static void drm_mode_object_put(struct drm_device *dev,
- struct drm_mode_object *object)
-{
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.crtc_idr, object->id);
- mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
- uint32_t id, uint32_t type)
-{
- struct drm_mode_object *obj = NULL;
-
- mutex_lock(&dev->mode_config.idr_mutex);
- obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (obj->type != type) || (obj->id != id))
- obj = NULL;
- mutex_unlock(&dev->mode_config.idr_mutex);
-
- return obj;
-}
-EXPORT_SYMBOL(drm_mode_object_find);
-
-/**
- * drm_framebuffer_init - initialize a framebuffer
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Allocates an ID for the framebuffer's parent mode object, sets its mode
- * functions & device file and adds it to the master fd list.
- *
- * RETURNS:
- * Zero on success, error code on failure.
- */
-int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
- const struct drm_framebuffer_funcs *funcs)
-{
- int ret;
-
- ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
- if (ret) {
- return ret;
- }
-
- fb->dev = dev;
- fb->funcs = funcs;
- dev->mode_config.num_fb++;
- list_add(&fb->head, &dev->mode_config.fb_list);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_framebuffer_init);
-
-/**
- * drm_framebuffer_cleanup - remove a framebuffer object
- * @fb: framebuffer to remove
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
- * it, setting it to NULL.
- */
-void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
-{
- struct drm_device *dev = fb->dev;
- struct drm_crtc *crtc;
- struct drm_mode_set set;
- int ret;
-
- /* remove from any CRTC */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb == fb) {
- /* should turn off the crtc */
- memset(&set, 0, sizeof(struct drm_mode_set));
- set.crtc = crtc;
- set.fb = NULL;
- ret = crtc->funcs->set_config(&set);
- if (ret)
- DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
- }
- }
-
- drm_mode_object_put(dev, &fb->base);
- list_del(&fb->head);
- dev->mode_config.num_fb--;
-}
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
-
-/**
- * drm_crtc_init - Initialise a new CRTC object
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @funcs: callbacks for the new CRTC
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Inits a new object created as base part of an driver crtc object.
- */
-void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs)
-{
- crtc->dev = dev;
- crtc->funcs = funcs;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
-
- list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
- dev->mode_config.num_crtc++;
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_crtc_init);
-
-/**
- * drm_crtc_cleanup - Cleans up the core crtc usage.
- * @crtc: CRTC to cleanup
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Cleanup @crtc. Removes from drm modesetting space
- * does NOT free object, caller does that.
- */
-void drm_crtc_cleanup(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
-
- if (crtc->gamma_store) {
- kfree(crtc->gamma_store);
- crtc->gamma_store = NULL;
- }
-
- drm_mode_object_put(dev, &crtc->base);
- list_del(&crtc->head);
- dev->mode_config.num_crtc--;
-}
-EXPORT_SYMBOL(drm_crtc_cleanup);
-
-/**
- * drm_mode_probed_add - add a mode to a connector's probed mode list
- * @connector: connector the new mode
- * @mode: mode data
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Add @mode to @connector's mode list for later use.
- */
-void drm_mode_probed_add(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- list_add(&mode->head, &connector->probed_modes);
-}
-EXPORT_SYMBOL(drm_mode_probed_add);
-
-/**
- * drm_mode_remove - remove and free a mode
- * @connector: connector list to modify
- * @mode: mode to remove
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Remove @mode from @connector's mode list, then free it.
- */
-void drm_mode_remove(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- list_del(&mode->head);
- kfree(mode);
-}
-EXPORT_SYMBOL(drm_mode_remove);
-
-/**
- * drm_connector_init - Init a preallocated connector
- * @dev: DRM device
- * @connector: the connector to init
- * @funcs: callbacks for this connector
- * @name: user visible name of the connector
- *
- * LOCKING:
- * Caller must hold @dev's mode_config lock.
- *
- * Initialises a preallocated connector. Connectors should be
- * subclassed as part of driver connector objects.
- */
-void drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type)
-{
- mutex_lock(&dev->mode_config.mutex);
-
- connector->dev = dev;
- connector->funcs = funcs;
- drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
- connector->connector_type = connector_type;
- connector->connector_type_id =
- ++drm_connector_enum_list[connector_type].count; /* TODO */
- INIT_LIST_HEAD(&connector->user_modes);
- INIT_LIST_HEAD(&connector->probed_modes);
- INIT_LIST_HEAD(&connector->modes);
- connector->edid_blob_ptr = NULL;
-
- list_add_tail(&connector->head, &dev->mode_config.connector_list);
- dev->mode_config.num_connector++;
-
- drm_connector_attach_property(connector,
- dev->mode_config.edid_property, 0);
-
- drm_connector_attach_property(connector,
- dev->mode_config.dpms_property, 0);
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_connector_init);
-
-/**
- * drm_connector_cleanup - cleans up an initialised connector
- * @connector: connector to cleanup
- *
- * LOCKING:
- * Caller must hold @dev's mode_config lock.
- *
- * Cleans up the connector but doesn't free the object.
- */
-void drm_connector_cleanup(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *t;
-
- list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
- drm_mode_remove(connector, mode);
-
- list_for_each_entry_safe(mode, t, &connector->modes, head)
- drm_mode_remove(connector, mode);
-
- list_for_each_entry_safe(mode, t, &connector->user_modes, head)
- drm_mode_remove(connector, mode);
-
- kfree(connector->fb_helper_private);
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_put(dev, &connector->base);
- list_del(&connector->head);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_connector_cleanup);
-
-void drm_encoder_init(struct drm_device *dev,
- struct drm_encoder *encoder,
- const struct drm_encoder_funcs *funcs,
- int encoder_type)
-{
- mutex_lock(&dev->mode_config.mutex);
-
- encoder->dev = dev;
-
- drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
- encoder->encoder_type = encoder_type;
- encoder->funcs = funcs;
-
- list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
- dev->mode_config.num_encoder++;
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_encoder_init);
-
-void drm_encoder_cleanup(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_put(dev, &encoder->base);
- list_del(&encoder->head);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_encoder_cleanup);
-
-/**
- * drm_mode_create - create a new display mode
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold DRM mode_config lock.
- *
- * Create a new drm_display_mode, give it an ID, and return it.
- *
- * RETURNS:
- * Pointer to new mode on success, NULL on error.
- */
-struct drm_display_mode *drm_mode_create(struct drm_device *dev)
-{
- struct drm_display_mode *nmode;
-
- nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
- if (!nmode)
- return NULL;
-
- drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
- return nmode;
-}
-EXPORT_SYMBOL(drm_mode_create);
-
-/**
- * drm_mode_destroy - remove a mode
- * @dev: DRM device
- * @mode: mode to remove
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Free @mode's unique identifier, then free it.
- */
-void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
-{
- drm_mode_object_put(dev, &mode->base);
-
- kfree(mode);
-}
-EXPORT_SYMBOL(drm_mode_destroy);
-
-static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
-{
- struct drm_property *edid;
- struct drm_property *dpms;
- int i;
-
- /*
- * Standard properties (apply to all connectors)
- */
- edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
- DRM_MODE_PROP_IMMUTABLE,
- "EDID", 0);
- dev->mode_config.edid_property = edid;
-
- dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
- drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
- drm_dpms_enum_list[i].name);
- dev->mode_config.dpms_property = dpms;
-
- return 0;
-}
-
-/**
- * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
- * @dev: DRM device
- *
- * Called by a driver the first time a DVI-I connector is made.
- */
-int drm_mode_create_dvi_i_properties(struct drm_device *dev)
-{
- struct drm_property *dvi_i_selector;
- struct drm_property *dvi_i_subconnector;
- int i;
-
- if (dev->mode_config.dvi_i_select_subconnector_property)
- return 0;
-
- dvi_i_selector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "select subconnector",
- ARRAY_SIZE(drm_dvi_i_select_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
- drm_property_add_enum(dvi_i_selector, i,
- drm_dvi_i_select_enum_list[i].type,
- drm_dvi_i_select_enum_list[i].name);
- dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
-
- dvi_i_subconnector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE,
- "subconnector",
- ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
- drm_property_add_enum(dvi_i_subconnector, i,
- drm_dvi_i_subconnector_enum_list[i].type,
- drm_dvi_i_subconnector_enum_list[i].name);
- dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
-
-/**
- * drm_create_tv_properties - create TV specific connector properties
- * @dev: DRM device
- * @num_modes: number of different TV formats (modes) supported
- * @modes: array of pointers to strings containing name of each format
- *
- * Called by a driver's TV initialization routine, this function creates
- * the TV specific connector properties for a given device. Caller is
- * responsible for allocating a list of format names and passing them to
- * this routine.
- */
-int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
- char *modes[])
-{
- struct drm_property *tv_selector;
- struct drm_property *tv_subconnector;
- int i;
-
- if (dev->mode_config.tv_select_subconnector_property)
- return 0;
-
- /*
- * Basic connector properties
- */
- tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "select subconnector",
- ARRAY_SIZE(drm_tv_select_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
- drm_property_add_enum(tv_selector, i,
- drm_tv_select_enum_list[i].type,
- drm_tv_select_enum_list[i].name);
- dev->mode_config.tv_select_subconnector_property = tv_selector;
-
- tv_subconnector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE, "subconnector",
- ARRAY_SIZE(drm_tv_subconnector_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
- drm_property_add_enum(tv_subconnector, i,
- drm_tv_subconnector_enum_list[i].type,
- drm_tv_subconnector_enum_list[i].name);
- dev->mode_config.tv_subconnector_property = tv_subconnector;
-
- /*
- * Other, TV specific properties: margins & TV modes.
- */
- dev->mode_config.tv_left_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left margin", 2);
- dev->mode_config.tv_left_margin_property->values[0] = 0;
- dev->mode_config.tv_left_margin_property->values[1] = 100;
-
- dev->mode_config.tv_right_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right margin", 2);
- dev->mode_config.tv_right_margin_property->values[0] = 0;
- dev->mode_config.tv_right_margin_property->values[1] = 100;
-
- dev->mode_config.tv_top_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top margin", 2);
- dev->mode_config.tv_top_margin_property->values[0] = 0;
- dev->mode_config.tv_top_margin_property->values[1] = 100;
-
- dev->mode_config.tv_bottom_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom margin", 2);
- dev->mode_config.tv_bottom_margin_property->values[0] = 0;
- dev->mode_config.tv_bottom_margin_property->values[1] = 100;
-
- dev->mode_config.tv_mode_property =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "mode", num_modes);
- for (i = 0; i < num_modes; i++)
- drm_property_add_enum(dev->mode_config.tv_mode_property, i,
- i, modes[i]);
-
- dev->mode_config.tv_brightness_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "brightness", 2);
- dev->mode_config.tv_brightness_property->values[0] = 0;
- dev->mode_config.tv_brightness_property->values[1] = 100;
-
- dev->mode_config.tv_contrast_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "contrast", 2);
- dev->mode_config.tv_contrast_property->values[0] = 0;
- dev->mode_config.tv_contrast_property->values[1] = 100;
-
- dev->mode_config.tv_flicker_reduction_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "flicker reduction", 2);
- dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
- dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
-
- dev->mode_config.tv_overscan_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "overscan", 2);
- dev->mode_config.tv_overscan_property->values[0] = 0;
- dev->mode_config.tv_overscan_property->values[1] = 100;
-
- dev->mode_config.tv_saturation_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "saturation", 2);
- dev->mode_config.tv_saturation_property->values[0] = 0;
- dev->mode_config.tv_saturation_property->values[1] = 100;
-
- dev->mode_config.tv_hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hue", 2);
- dev->mode_config.tv_hue_property->values[0] = 0;
- dev->mode_config.tv_hue_property->values[1] = 100;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_tv_properties);
-
-/**
- * drm_mode_create_scaling_mode_property - create scaling mode property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_scaling_mode_property(struct drm_device *dev)
-{
- struct drm_property *scaling_mode;
- int i;
-
- if (dev->mode_config.scaling_mode_property)
- return 0;
-
- scaling_mode =
- drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
- ARRAY_SIZE(drm_scaling_mode_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
- drm_property_add_enum(scaling_mode, i,
- drm_scaling_mode_enum_list[i].type,
- drm_scaling_mode_enum_list[i].name);
-
- dev->mode_config.scaling_mode_property = scaling_mode;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
-
-/**
- * drm_mode_create_dithering_property - create dithering property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dithering_property(struct drm_device *dev)
-{
- struct drm_property *dithering_mode;
- int i;
-
- if (dev->mode_config.dithering_mode_property)
- return 0;
-
- dithering_mode =
- drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
- ARRAY_SIZE(drm_dithering_mode_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
- drm_property_add_enum(dithering_mode, i,
- drm_dithering_mode_enum_list[i].type,
- drm_dithering_mode_enum_list[i].name);
- dev->mode_config.dithering_mode_property = dithering_mode;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dithering_property);
-
-/**
- * drm_mode_create_dirty_property - create dirty property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dirty_info_property(struct drm_device *dev)
-{
- struct drm_property *dirty_info;
- int i;
-
- if (dev->mode_config.dirty_info_property)
- return 0;
-
- dirty_info =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE,
- "dirty",
- ARRAY_SIZE(drm_dirty_info_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
- drm_property_add_enum(dirty_info, i,
- drm_dirty_info_enum_list[i].type,
- drm_dirty_info_enum_list[i].name);
- dev->mode_config.dirty_info_property = dirty_info;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
-
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * LOCKING:
- * None, should happen single threaded at init time.
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
- mutex_init(&dev->mode_config.mutex);
- mutex_init(&dev->mode_config.idr_mutex);
- INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
- INIT_LIST_HEAD(&dev->mode_config.crtc_list);
- INIT_LIST_HEAD(&dev->mode_config.connector_list);
- INIT_LIST_HEAD(&dev->mode_config.encoder_list);
- INIT_LIST_HEAD(&dev->mode_config.property_list);
- INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
- idr_init(&dev->mode_config.crtc_idr);
-
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_create_standard_connector_properties(dev);
- mutex_unlock(&dev->mode_config.mutex);
-
- /* Just to be sure */
- dev->mode_config.num_fb = 0;
- dev->mode_config.num_connector = 0;
- dev->mode_config.num_crtc = 0;
- dev->mode_config.num_encoder = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
-int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
-{
- uint32_t total_objects = 0;
-
- total_objects += dev->mode_config.num_crtc;
- total_objects += dev->mode_config.num_connector;
- total_objects += dev->mode_config.num_encoder;
-
- if (total_objects == 0)
- return -EINVAL;
-
- group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
- if (!group->id_list)
- return -ENOMEM;
-
- group->num_crtcs = 0;
- group->num_connectors = 0;
- group->num_encoders = 0;
- return 0;
-}
-
-int drm_mode_group_init_legacy_group(struct drm_device *dev,
- struct drm_mode_group *group)
-{
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
- if ((ret = drm_mode_group_init(dev, group)))
- return ret;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- group->id_list[group->num_crtcs++] = crtc->base.id;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- group->id_list[group->num_crtcs + group->num_encoders++] =
- encoder->base.id;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- group->id_list[group->num_crtcs + group->num_encoders +
- group->num_connectors++] = connector->base.id;
-
- return 0;
-}
-
-/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
- struct drm_connector *connector, *ot;
- struct drm_crtc *crtc, *ct;
- struct drm_encoder *encoder, *enct;
- struct drm_framebuffer *fb, *fbt;
- struct drm_property *property, *pt;
-
- list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
- head) {
- encoder->funcs->destroy(encoder);
- }
-
- list_for_each_entry_safe(connector, ot,
- &dev->mode_config.connector_list, head) {
- connector->funcs->destroy(connector);
- }
-
- list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
- head) {
- drm_property_destroy(dev, property);
- }
-
- list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- fb->funcs->destroy(fb);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
- }
-
-}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
-
-/**
- * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
- * @out: drm_mode_modeinfo struct to return to the user
- * @in: drm_display_mode to use
- *
- * LOCKING:
- * None.
- *
- * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
- * the user.
- */
-void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
- struct drm_display_mode *in)
-{
- out->clock = in->clock;
- out->hdisplay = in->hdisplay;
- out->hsync_start = in->hsync_start;
- out->hsync_end = in->hsync_end;
- out->htotal = in->htotal;
- out->hskew = in->hskew;
- out->vdisplay = in->vdisplay;
- out->vsync_start = in->vsync_start;
- out->vsync_end = in->vsync_end;
- out->vtotal = in->vtotal;
- out->vscan = in->vscan;
- out->vrefresh = in->vrefresh;
- out->flags = in->flags;
- out->type = in->type;
- strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
- out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
-}
-
-/**
- * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
- * @out: drm_display_mode to return to the user
- * @in: drm_mode_modeinfo to use
- *
- * LOCKING:
- * None.
- *
- * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
- * the caller.
- */
-void drm_crtc_convert_umode(struct drm_display_mode *out,
- struct drm_mode_modeinfo *in)
-{
- out->clock = in->clock;
- out->hdisplay = in->hdisplay;
- out->hsync_start = in->hsync_start;
- out->hsync_end = in->hsync_end;
- out->htotal = in->htotal;
- out->hskew = in->hskew;
- out->vdisplay = in->vdisplay;
- out->vsync_start = in->vsync_start;
- out->vsync_end = in->vsync_end;
- out->vtotal = in->vtotal;
- out->vscan = in->vscan;
- out->vrefresh = in->vrefresh;
- out->flags = in->flags;
- out->type = in->type;
- strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
- out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
-}
-
-/**
- * drm_mode_getresources - get graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Construct a set of configuration description structures and return
- * them to the user, including CRTC, connector and framebuffer configuration.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getresources(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_card_res *card_res = data;
- struct list_head *lh;
- struct drm_framebuffer *fb;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int ret = 0;
- int connector_count = 0;
- int crtc_count = 0;
- int fb_count = 0;
- int encoder_count = 0;
- int copied = 0, i;
- uint32_t __user *fb_id;
- uint32_t __user *crtc_id;
- uint32_t __user *connector_id;
- uint32_t __user *encoder_id;
- struct drm_mode_group *mode_group;
-
- mutex_lock(&dev->mode_config.mutex);
-
- /*
- * For the non-control nodes we need to limit the list of resources
- * by IDs in the group list for this node
- */
- list_for_each(lh, &file_priv->fbs)
- fb_count++;
-
- mode_group = &file_priv->master->minor->mode_group;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
-
- list_for_each(lh, &dev->mode_config.crtc_list)
- crtc_count++;
-
- list_for_each(lh, &dev->mode_config.connector_list)
- connector_count++;
-
- list_for_each(lh, &dev->mode_config.encoder_list)
- encoder_count++;
- } else {
-
- crtc_count = mode_group->num_crtcs;
- connector_count = mode_group->num_connectors;
- encoder_count = mode_group->num_encoders;
- }
-
- card_res->max_height = dev->mode_config.max_height;
- card_res->min_height = dev->mode_config.min_height;
- card_res->max_width = dev->mode_config.max_width;
- card_res->min_width = dev->mode_config.min_width;
-
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
-
- /* CRTCs */
- if (card_res->count_crtcs >= crtc_count) {
- copied = 0;
- crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- head) {
- DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id);
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- for (i = 0; i < mode_group->num_crtcs; i++) {
- if (put_user(mode_group->id_list[i],
- crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- card_res->count_crtcs = crtc_count;
-
- /* Encoders */
- if (card_res->count_encoders >= encoder_count) {
- copied = 0;
- encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- head) {
- DRM_DEBUG_KMS("ENCODER ID is %d\n",
- encoder->base.id);
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
- if (put_user(mode_group->id_list[i],
- encoder_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
-
- }
- }
- card_res->count_encoders = encoder_count;
-
- /* Connectors */
- if (card_res->count_connectors >= connector_count) {
- copied = 0;
- connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- DRM_DEBUG_KMS("CONNECTOR ID is %d\n",
- connector->base.id);
- if (put_user(connector->base.id,
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- int start = mode_group->num_crtcs +
- mode_group->num_encoders;
- for (i = start; i < start + mode_group->num_connectors; i++) {
- if (put_user(mode_group->id_list[i],
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- card_res->count_connectors = connector_count;
-
- DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs,
- card_res->count_connectors, card_res->count_encoders);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_getcrtc - get CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Construct a CRTC configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc *crtc_resp = data;
- struct drm_crtc *crtc;
- struct drm_mode_object *obj;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- crtc_resp->x = crtc->x;
- crtc_resp->y = crtc->y;
- crtc_resp->gamma_size = crtc->gamma_size;
- if (crtc->fb)
- crtc_resp->fb_id = crtc->fb->base.id;
- else
- crtc_resp->fb_id = 0;
-
- if (crtc->enabled) {
-
- drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
- crtc_resp->mode_valid = 1;
-
- } else {
- crtc_resp->mode_valid = 0;
- }
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_getconnector - get connector configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Construct a connector configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getconnector(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_get_connector *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_connector *connector;
- struct drm_display_mode *mode;
- int mode_count = 0;
- int props_count = 0;
- int encoders_count = 0;
- int ret = 0;
- int copied = 0;
- int i;
- struct drm_mode_modeinfo u_mode;
- struct drm_mode_modeinfo __user *mode_ptr;
- uint32_t __user *prop_ptr;
- uint64_t __user *prop_values;
- uint32_t __user *encoder_ptr;
-
- memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
-
- DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id);
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, out_resp->connector_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- props_count++;
- }
- }
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- encoders_count++;
- }
- }
-
- if (out_resp->count_modes == 0) {
- connector->funcs->fill_modes(connector,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
- }
-
- /* delayed so we get modes regardless of pre-fill_modes state */
- list_for_each_entry(mode, &connector->modes, head)
- mode_count++;
-
- out_resp->connector_id = connector->base.id;
- out_resp->connector_type = connector->connector_type;
- out_resp->connector_type_id = connector->connector_type_id;
- out_resp->mm_width = connector->display_info.width_mm;
- out_resp->mm_height = connector->display_info.height_mm;
- out_resp->subpixel = connector->display_info.subpixel_order;
- out_resp->connection = connector->status;
- if (connector->encoder)
- out_resp->encoder_id = connector->encoder->base.id;
- else
- out_resp->encoder_id = 0;
-
- /*
- * This ioctl is called twice, once to determine how much space is
- * needed, and the 2nd time to fill it.
- */
- if ((out_resp->count_modes >= mode_count) && mode_count) {
- copied = 0;
- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
- list_for_each_entry(mode, &connector->modes, head) {
- drm_crtc_convert_to_umode(&u_mode, mode);
- if (copy_to_user(mode_ptr + copied,
- &u_mode, sizeof(u_mode))) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- out_resp->count_modes = mode_count;
-
- if ((out_resp->count_props >= props_count) && props_count) {
- copied = 0;
- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- if (put_user(connector->property_ids[i],
- prop_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
-
- if (put_user(connector->property_values[i],
- prop_values + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- out_resp->count_props = props_count;
-
- if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
- copied = 0;
- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- if (put_user(connector->encoder_ids[i],
- encoder_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- out_resp->count_encoders = encoders_count;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_getencoder(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_get_encoder *enc_resp = data;
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, enc_resp->encoder_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- encoder = obj_to_encoder(obj);
-
- if (encoder->crtc)
- enc_resp->crtc_id = encoder->crtc->base.id;
- else
- enc_resp->crtc_id = 0;
- enc_resp->encoder_type = encoder->encoder_type;
- enc_resp->encoder_id = encoder->base.id;
- enc_resp->possible_crtcs = encoder->possible_crtcs;
- enc_resp->possible_clones = encoder->possible_clones;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_setcrtc - set CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Build a new CRTC configuration based on user request.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_setcrtc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_mode_crtc *crtc_req = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc, *crtcfb;
- struct drm_connector **connector_set = NULL, *connector;
- struct drm_framebuffer *fb = NULL;
- struct drm_display_mode *mode = NULL;
- struct drm_mode_set set;
- uint32_t __user *set_connectors_ptr;
- int ret = 0;
- int i;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- if (crtc_req->mode_valid) {
- /* If we have a mode we need a framebuffer. */
- /* If we pass -1, set the mode with the currently bound fb */
- if (crtc_req->fb_id == -1) {
- list_for_each_entry(crtcfb,
- &dev->mode_config.crtc_list, head) {
- if (crtcfb == crtc) {
- DRM_DEBUG_KMS("Using current fb for "
- "setmode\n");
- fb = crtc->fb;
- }
- }
- } else {
- obj = drm_mode_object_find(dev, crtc_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown FB ID%d\n",
- crtc_req->fb_id);
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
- }
-
- mode = drm_mode_create(dev);
- drm_crtc_convert_umode(mode, &crtc_req->mode);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- }
-
- if (crtc_req->count_connectors == 0 && mode) {
- DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
- ret = -EINVAL;
- goto out;
- }
-
- if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
- DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
- crtc_req->count_connectors);
- ret = -EINVAL;
- goto out;
- }
-
- if (crtc_req->count_connectors > 0) {
- u32 out_id;
-
- /* Avoid unbounded kernel memory allocation */
- if (crtc_req->count_connectors > config->num_connector) {
- ret = -EINVAL;
- goto out;
- }
-
- connector_set = kmalloc(crtc_req->count_connectors *
- sizeof(struct drm_connector *),
- GFP_KERNEL);
- if (!connector_set) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < crtc_req->count_connectors; i++) {
- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
- if (get_user(out_id, &set_connectors_ptr[i])) {
- ret = -EFAULT;
- goto out;
- }
-
- obj = drm_mode_object_find(dev, out_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- DRM_DEBUG_KMS("Connector id %d unknown\n",
- out_id);
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- connector_set[i] = connector;
- }
- }
-
- set.crtc = crtc;
- set.x = crtc_req->x;
- set.y = crtc_req->y;
- set.mode = mode;
- set.connectors = connector_set;
- set.num_connectors = crtc_req->count_connectors;
- set.fb = fb;
- ret = crtc->funcs->set_config(&set);
-
-out:
- kfree(connector_set);
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_cursor_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_cursor *req = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- int ret = 0;
-
- if (!req->flags) {
- DRM_ERROR("no operation set\n");
- return -EINVAL;
- }
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- if (req->flags & DRM_MODE_CURSOR_BO) {
- if (!crtc->funcs->cursor_set) {
- DRM_ERROR("crtc does not support cursor\n");
- ret = -ENXIO;
- goto out;
- }
- /* Turns off the cursor if handle is 0 */
- ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
- req->width, req->height);
- }
-
- if (req->flags & DRM_MODE_CURSOR_MOVE) {
- if (crtc->funcs->cursor_move) {
- ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
- } else {
- DRM_ERROR("crtc does not support cursor\n");
- ret = -EFAULT;
- goto out;
- }
- }
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_addfb - add an FB to the graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Add a new FB to the specified CRTC, given a user request.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_fb_cmd *r = data;
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_framebuffer *fb;
- int ret = 0;
-
- if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_ERROR("mode new framebuffer width not within limits\n");
- return -EINVAL;
- }
- if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_ERROR("mode new framebuffer height not within limits\n");
- return -EINVAL;
- }
-
- mutex_lock(&dev->mode_config.mutex);
-
- /* TODO check buffer is sufficently large */
- /* TODO setup destructor callback */
-
- fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
- if (!fb) {
- DRM_ERROR("could not create framebuffer\n");
- ret = -EINVAL;
- goto out;
- }
-
- r->fb_id = fb->base.id;
- list_add(&fb->filp_head, &file_priv->fbs);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_rmfb - remove an FB from the configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Remove the FB specified by the user.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb = NULL;
- struct drm_framebuffer *fbl = NULL;
- uint32_t *id = data;
- int ret = 0;
- int found = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
- /* TODO check that we realy get a framebuffer back. */
- if (!obj) {
- DRM_ERROR("mode invalid framebuffer id\n");
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
-
- list_for_each_entry(fbl, &file_priv->fbs, filp_head)
- if (fb == fbl)
- found = 1;
-
- if (!found) {
- DRM_ERROR("tried to remove a fb that we didn't own\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* TODO release all crtc connected to the framebuffer */
- /* TODO unhock the destructor from the buffer object */
-
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_getfb - get FB info
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Lookup the FB given its ID and return info about it.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_fb_cmd *r = data;
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
-
- r->height = fb->height;
- r->width = fb->width;
- r->depth = fb->depth;
- r->bpp = fb->bits_per_pixel;
- r->pitch = fb->pitch;
- fb->funcs->create_handle(fb, file_priv, &r->handle);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_clip_rect __user *clips_ptr;
- struct drm_clip_rect *clips = NULL;
- struct drm_mode_fb_dirty_cmd *r = data;
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb;
- unsigned flags;
- int num_clips;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
- ret = -EINVAL;
- goto out_err1;
- }
- fb = obj_to_fb(obj);
-
- num_clips = r->num_clips;
- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
-
- if (!num_clips != !clips_ptr) {
- ret = -EINVAL;
- goto out_err1;
- }
-
- flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
-
- /* If userspace annotates copy, clips must come in pairs */
- if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
- ret = -EINVAL;
- goto out_err1;
- }
-
- if (num_clips && clips_ptr) {
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
- if (!clips) {
- ret = -ENOMEM;
- goto out_err1;
- }
-
- ret = copy_from_user(clips, clips_ptr,
- num_clips * sizeof(*clips));
- if (ret)
- goto out_err2;
- }
-
- if (fb->funcs->dirty) {
- ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
- } else {
- ret = -ENOSYS;
- goto out_err2;
- }
-
-out_err2:
- kfree(clips);
-out_err1:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-
-/**
- * drm_fb_release - remove and free the FBs on this file
- * @filp: file * from the ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Destroy all the FBs associated with @filp.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-void drm_fb_release(struct drm_file *priv)
-{
- struct drm_device *dev = priv->minor->dev;
- struct drm_framebuffer *fb, *tfb;
-
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
- }
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-/**
- * drm_mode_attachmode - add a mode to the user mode list
- * @dev: DRM device
- * @connector: connector to add the mode to
- * @mode: mode to add
- *
- * Add @mode to @connector's user mode list.
- */
-static int drm_mode_attachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- int ret = 0;
-
- list_add_tail(&mode->head, &connector->user_modes);
- return ret;
-}
-
-int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_connector *connector;
- int ret = 0;
- struct drm_display_mode *dup_mode;
- int need_dup = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder)
- break;
- if (connector->encoder->crtc == crtc) {
- if (need_dup)
- dup_mode = drm_mode_duplicate(dev, mode);
- else
- dup_mode = mode;
- ret = drm_mode_attachmode(dev, connector, dup_mode);
- if (ret)
- return ret;
- need_dup = 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_attachmode_crtc);
-
-static int drm_mode_detachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- int found = 0;
- int ret = 0;
- struct drm_display_mode *match_mode, *t;
-
- list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
- if (drm_mode_equal(match_mode, mode)) {
- list_del(&match_mode->head);
- drm_mode_destroy(dev, match_mode);
- found = 1;
- break;
- }
- }
-
- if (!found)
- ret = -EINVAL;
-
- return ret;
-}
-
-int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
-{
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_mode_detachmode(dev, connector, mode);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_detachmode_crtc);
-
-/**
- * drm_fb_attachmode - Attach a user mode to an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * This attaches a user specified mode to an connector.
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_attachmode_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_mode_cmd *mode_cmd = data;
- struct drm_connector *connector;
- struct drm_display_mode *mode;
- struct drm_mode_object *obj;
- struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- mode = drm_mode_create(dev);
- if (!mode) {
- ret = -ENOMEM;
- goto out;
- }
-
- drm_crtc_convert_umode(mode, umode);
-
- ret = drm_mode_attachmode(dev, connector, mode);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-
-/**
- * drm_fb_detachmode - Detach a user specified mode from an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_detachmode_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_mode_mode_cmd *mode_cmd = data;
- struct drm_connector *connector;
- struct drm_display_mode mode;
- struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- drm_crtc_convert_umode(&mode, umode);
- ret = drm_mode_detachmode(dev, connector, &mode);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
- const char *name, int num_values)
-{
- struct drm_property *property = NULL;
-
- property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
- if (!property)
- return NULL;
-
- if (num_values) {
- property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
- if (!property->values)
- goto fail;
- }
-
- drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
- property->flags = flags;
- property->num_values = num_values;
- INIT_LIST_HEAD(&property->enum_blob_list);
-
- if (name)
- strncpy(property->name, name, DRM_PROP_NAME_LEN);
-
- list_add_tail(&property->head, &dev->mode_config.property_list);
- return property;
-fail:
- kfree(property);
- return NULL;
-}
-EXPORT_SYMBOL(drm_property_create);
-
-int drm_property_add_enum(struct drm_property *property, int index,
- uint64_t value, const char *name)
-{
- struct drm_property_enum *prop_enum;
-
- if (!(property->flags & DRM_MODE_PROP_ENUM))
- return -EINVAL;
-
- if (!list_empty(&property->enum_blob_list)) {
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
- if (prop_enum->value == value) {
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
- return 0;
- }
- }
- }
-
- prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
- if (!prop_enum)
- return -ENOMEM;
-
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
- prop_enum->value = value;
-
- property->values[index] = value;
- list_add_tail(&prop_enum->head, &property->enum_blob_list);
- return 0;
-}
-EXPORT_SYMBOL(drm_property_add_enum);
-
-void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
-{
- struct drm_property_enum *prop_enum, *pt;
-
- list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
- list_del(&prop_enum->head);
- kfree(prop_enum);
- }
-
- if (property->num_values)
- kfree(property->values);
- drm_mode_object_put(dev, &property->base);
- list_del(&property->head);
- kfree(property);
-}
-EXPORT_SYMBOL(drm_property_destroy);
-
-int drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == 0) {
- connector->property_ids[i] = property->base.id;
- connector->property_values[i] = init_val;
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t value)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- connector->property_values[i] = value;
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t *val)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- *val = connector->property_values[i];
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
-int drm_mode_getproperty_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_mode_get_property *out_resp = data;
- struct drm_property *property;
- int enum_count = 0;
- int blob_count = 0;
- int value_count = 0;
- int ret = 0, i;
- int copied;
- struct drm_property_enum *prop_enum;
- struct drm_mode_property_enum __user *enum_ptr;
- struct drm_property_blob *prop_blob;
- uint32_t *blob_id_ptr;
- uint64_t __user *values_ptr;
- uint32_t __user *blob_length_ptr;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
- ret = -EINVAL;
- goto done;
- }
- property = obj_to_property(obj);
-
- if (property->flags & DRM_MODE_PROP_ENUM) {
- list_for_each_entry(prop_enum, &property->enum_blob_list, head)
- enum_count++;
- } else if (property->flags & DRM_MODE_PROP_BLOB) {
- list_for_each_entry(prop_blob, &property->enum_blob_list, head)
- blob_count++;
- }
-
- value_count = property->num_values;
-
- strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
- out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
- out_resp->flags = property->flags;
-
- if ((out_resp->count_values >= value_count) && value_count) {
- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
- for (i = 0; i < value_count; i++) {
- if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
- ret = -EFAULT;
- goto done;
- }
- }
- }
- out_resp->count_values = value_count;
-
- if (property->flags & DRM_MODE_PROP_ENUM) {
- if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
- copied = 0;
- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
-
- if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
- ret = -EFAULT;
- goto done;
- }
-
- if (copy_to_user(&enum_ptr[copied].name,
- &prop_enum->name, DRM_PROP_NAME_LEN)) {
- ret = -EFAULT;
- goto done;
- }
- copied++;
- }
- }
- out_resp->count_enum_blobs = enum_count;
- }
-
- if (property->flags & DRM_MODE_PROP_BLOB) {
- if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
- copied = 0;
- blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
-
- list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
- if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
- ret = -EFAULT;
- goto done;
- }
-
- if (put_user(prop_blob->length, blob_length_ptr + copied)) {
- ret = -EFAULT;
- goto done;
- }
-
- copied++;
- }
- }
- out_resp->count_enum_blobs = blob_count;
- }
-done:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
- void *data)
-{
- struct drm_property_blob *blob;
-
- if (!length || !data)
- return NULL;
-
- blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
- if (!blob)
- return NULL;
-
- blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
- blob->length = length;
-
- memcpy(blob->data, data, length);
-
- drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
-
- list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
- return blob;
-}
-
-static void drm_property_destroy_blob(struct drm_device *dev,
- struct drm_property_blob *blob)
-{
- drm_mode_object_put(dev, &blob->base);
- list_del(&blob->head);
- kfree(blob);
-}
-
-int drm_mode_getblob_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_mode_get_blob *out_resp = data;
- struct drm_property_blob *blob;
- int ret = 0;
- void *blob_ptr;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
- if (!obj) {
- ret = -EINVAL;
- goto done;
- }
- blob = obj_to_blob(obj);
-
- if (out_resp->length == blob->length) {
- blob_ptr = (void *)(unsigned long)out_resp->data;
- if (copy_to_user(blob_ptr, blob->data, blob->length)){
- ret = -EFAULT;
- goto done;
- }
- }
- out_resp->length = blob->length;
-
-done:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- int ret = 0;
-
- if (connector->edid_blob_ptr)
- drm_property_destroy_blob(dev, connector->edid_blob_ptr);
-
- /* Delete edid, when there is none. */
- if (!edid) {
- connector->edid_blob_ptr = NULL;
- ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
- return ret;
- }
-
- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
-
- ret = drm_connector_property_set_value(connector,
- dev->mode_config.edid_property,
- connector->edid_blob_ptr->base.id);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
-
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_connector_set_property *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_property *property;
- struct drm_connector *connector;
- int ret = -EINVAL;
- int i;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- goto out;
- }
- connector = obj_to_connector(obj);
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == out_resp->prop_id)
- break;
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY) {
- goto out;
- }
-
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
- goto out;
- }
- property = obj_to_property(obj);
-
- if (property->flags & DRM_MODE_PROP_IMMUTABLE)
- goto out;
-
- if (property->flags & DRM_MODE_PROP_RANGE) {
- if (out_resp->value < property->values[0])
- goto out;
-
- if (out_resp->value > property->values[1])
- goto out;
- } else {
- int found = 0;
- for (i = 0; i < property->num_values; i++) {
- if (property->values[i] == out_resp->value) {
- found = 1;
- break;
- }
- }
- if (!found) {
- goto out;
- }
- }
-
- /* Do DPMS ourselves */
- if (property == connector->dev->mode_config.dpms_property) {
- if (connector->funcs->dpms)
- (*connector->funcs->dpms)(connector, (int) out_resp->value);
- ret = 0;
- } else if (connector->funcs->set_property)
- ret = connector->funcs->set_property(connector, property, out_resp->value);
-
- /* store the property value if successful */
- if (!ret)
- drm_connector_property_set_value(connector, property, out_resp->value);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0) {
- connector->encoder_ids[i] = encoder->base.id;
- return 0;
- }
- }
- return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
-
-void drm_mode_connector_detach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
-{
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == encoder->base.id) {
- connector->encoder_ids[i] = 0;
- if (connector->encoder == encoder)
- connector->encoder = NULL;
- break;
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
-
-bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
- int gamma_size)
-{
- crtc->gamma_size = gamma_size;
-
- crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
- if (!crtc->gamma_store) {
- crtc->gamma_size = 0;
- return false;
- }
-
- return true;
-}
-EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
-
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- void *r_base, *g_base, *b_base;
- int size;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- /* memcpy into gamma store */
- if (crtc_lut->gamma_size != crtc->gamma_size) {
- ret = -EINVAL;
- goto out;
- }
-
- size = crtc_lut->gamma_size * (sizeof(uint16_t));
- r_base = crtc->gamma_store;
- if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- g_base = r_base + size;
- if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- b_base = g_base + size;
- if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-
-}
-
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- void *r_base, *g_base, *b_base;
- int size;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- /* memcpy into gamma store */
- if (crtc_lut->gamma_size != crtc->gamma_size) {
- ret = -EINVAL;
- goto out;
- }
-
- size = crtc_lut->gamma_size * (sizeof(uint16_t));
- r_base = crtc->gamma_store;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- g_base = r_base + size;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- b_base = g_base + size;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_page_flip *page_flip = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
- struct drm_pending_vblank_event *e = NULL;
- unsigned long flags;
- int ret = -EINVAL;
-
- if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
- page_flip->reserved != 0)
- return -EINVAL;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj)
- goto out;
- crtc = obj_to_crtc(obj);
-
- if (crtc->funcs->page_flip == NULL)
- goto out;
-
- obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj)
- goto out;
- fb = obj_to_fb(obj);
-
- if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- ret = -ENOMEM;
- spin_lock_irqsave(&dev->event_lock, flags);
- if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- goto out;
- }
- file_priv->event_space -= sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL) {
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- goto out;
- }
-
- e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
- e->event.base.length = sizeof e->event;
- e->event.user_data = page_flip->user_data;
- e->base.event = &e->event.base;
- e->base.file_priv = file_priv;
- e->base.destroy =
- (void (*) (struct drm_pending_event *)) kfree;
- }
-
- ret = crtc->funcs->page_flip(crtc, fb, e);
- if (ret) {
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- }
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
+++ /dev/null
-/**
- * \file drm_debugfs.c
- * debugfs support for DRM
- *
- * \author Ben Gamari <bgamari@gmail.com>
- */
-
-/*
- * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
- *
- * Copyright 2008 Ben Gamari <bgamari@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include "drmP.h"
-
-#if defined(CONFIG_DEBUG_FS)
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-static struct drm_info_list drm_debugfs_list[] = {
- {"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
- {"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
- {"bufs", drm_bufs_info, 0},
- {"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
- {"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
-
-
-static int drm_debugfs_open(struct inode *inode, struct file *file)
-{
- struct drm_info_node *node = inode->i_private;
-
- return single_open(file, node->info_ent->show, node);
-}
-
-
-static const struct file_operations drm_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = drm_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-/**
- * Initialize a given set of debugfs files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI debugfs dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of debugfs files represented by an array of
- * gdm_debugfs_lists in the given root directory.
- */
-int drm_debugfs_create_files(struct drm_info_list *files, int count,
- struct dentry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct dentry *ent;
- struct drm_info_node *tmp;
- char name[64];
- int i, ret;
-
- for (i = 0; i < count; i++) {
- u32 features = files[i].driver_features;
-
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
- continue;
-
- tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (tmp == NULL) {
- ret = -1;
- goto fail;
- }
- ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
- root, tmp, &drm_debugfs_fops);
- if (!ent) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
- name, files[i].name);
- kfree(tmp);
- ret = -1;
- goto fail;
- }
-
- tmp->minor = minor;
- tmp->dent = ent;
- tmp->info_ent = &files[i];
- list_add(&(tmp->list), &(minor->debugfs_nodes.list));
- }
- return 0;
-
-fail:
- drm_debugfs_remove_files(files, count, minor);
- return ret;
-}
-EXPORT_SYMBOL(drm_debugfs_create_files);
-
-/**
- * Initialize the DRI debugfs filesystem for a device
- *
- * \param dev DRM device
- * \param minor device minor number
- * \param root DRI debugfs dir entry.
- *
- * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
- * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
- * "/sys/kernel/debug/dri/%minor%/%name%".
- */
-int drm_debugfs_init(struct drm_minor *minor, int minor_id,
- struct dentry *root)
-{
- struct drm_device *dev = minor->dev;
- char name[64];
- int ret;
-
- INIT_LIST_HEAD(&minor->debugfs_nodes.list);
- sprintf(name, "%d", minor_id);
- minor->debugfs_root = debugfs_create_dir(name, root);
- if (!minor->debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
- return -1;
- }
-
- ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
- if (ret) {
- debugfs_remove(minor->debugfs_root);
- minor->debugfs_root = NULL;
- DRM_ERROR("Failed to create core drm debugfs files\n");
- return ret;
- }
-
- if (dev->driver->debugfs_init) {
- ret = dev->driver->debugfs_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/sys/kernel/debug/dri.\n");
- return ret;
- }
- }
- return 0;
-}
-
-
-/**
- * Remove a list of debugfs files
- *
- * \param files The list of files
- * \param count The number of files
- * \param minor The minor of which we should remove the files
- * \return always zero.
- *
- * Remove all debugfs entries created by debugfs_init().
- */
-int drm_debugfs_remove_files(struct drm_info_list *files, int count,
- struct drm_minor *minor)
-{
- struct list_head *pos, *q;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
- tmp = list_entry(pos, struct drm_info_node, list);
- if (tmp->info_ent == &files[i]) {
- debugfs_remove(tmp->dent);
- list_del(pos);
- kfree(tmp);
- }
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_debugfs_remove_files);
-
-/**
- * Cleanup the debugfs filesystem resources.
- *
- * \param minor device minor number.
- * \return always zero.
- *
- * Remove all debugfs entries created by debugfs_init().
- */
-int drm_debugfs_cleanup(struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
-
- if (!minor->debugfs_root)
- return 0;
-
- if (dev->driver->debugfs_cleanup)
- dev->driver->debugfs_cleanup(minor);
-
- drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
-
- debugfs_remove(minor->debugfs_root);
- minor->debugfs_root = NULL;
-
- return 0;
-}
-
-#endif /* CONFIG_DEBUG_FS */
-
+++ /dev/null
-/**
- * \file drm_dma.c
- * DMA IOCTL and function support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Initialize the DMA data.
- *
- * \param dev DRM device.
- * \return zero on success or a negative value on failure.
- *
- * Allocate and initialize a drm_device_dma structure.
- */
-int drm_dma_setup(struct drm_device *dev)
-{
- int i;
-
- dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
- if (!dev->dma)
- return -ENOMEM;
-
- memset(dev->dma, 0, sizeof(*dev->dma));
-
- for (i = 0; i <= DRM_MAX_ORDER; i++)
- memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
-
- return 0;
-}
-
-/**
- * Cleanup the DMA resources.
- *
- * \param dev DRM device.
- *
- * Free all pages associated with DMA buffers, the buffers and pages lists, and
- * finally the drm_device::dma structure itself.
- */
-void drm_dma_takedown(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i, j;
-
- if (!dma)
- return;
-
- /* Clear dma buffers */
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].seg_count) {
- DRM_DEBUG("order %d: buf_count = %d,"
- " seg_count = %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].seg_count);
- for (j = 0; j < dma->bufs[i].seg_count; j++) {
- if (dma->bufs[i].seglist[j]) {
- drm_pci_free(dev, dma->bufs[i].seglist[j]);
- }
- }
- kfree(dma->bufs[i].seglist);
- }
- if (dma->bufs[i].buf_count) {
- for (j = 0; j < dma->bufs[i].buf_count; j++) {
- kfree(dma->bufs[i].buflist[j].dev_private);
- }
- kfree(dma->bufs[i].buflist);
- }
- }
-
- kfree(dma->buflist);
- kfree(dma->pagelist);
- kfree(dev->dma);
- dev->dma = NULL;
-}
-
-/**
- * Free a buffer.
- *
- * \param dev DRM device.
- * \param buf buffer to free.
- *
- * Resets the fields of \p buf.
- */
-void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
-{
- if (!buf)
- return;
-
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
- buf->used = 0;
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
- && waitqueue_active(&buf->dma_wait)) {
- wake_up_interruptible(&buf->dma_wait);
- }
-}
-
-/**
- * Reclaim the buffers.
- *
- * \param file_priv DRM file private.
- *
- * Frees each buffer associated with \p file_priv not already on the hardware.
- */
-void drm_core_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- for (i = 0; i < dma->buf_count; i++) {
- if (dma->buflist[i]->file_priv == file_priv) {
- switch (dma->buflist[i]->list) {
- case DRM_LIST_NONE:
- drm_free_buffer(dev, dma->buflist[i]);
- break;
- case DRM_LIST_WAIT:
- dma->buflist[i]->list = DRM_LIST_RECLAIM;
- break;
- default:
- /* Buffer already on hardware. */
- break;
- }
- }
- }
-}
-
-EXPORT_SYMBOL(drm_core_reclaim_buffers);
+++ /dev/null
-/**
- * \file drm_drawable.c
- * IOCTLs for drawables
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- * \author Michel Dänzer <michel@tungstengraphics.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Allocate drawable ID and memory to store information about it.
- */
-int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- unsigned long irqflags;
- struct drm_draw *draw = data;
- int new_id = 0;
- int ret;
-
-again:
- if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
- ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
- if (ret == -EAGAIN) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- goto again;
- }
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- draw->handle = new_id;
-
- DRM_DEBUG("%d\n", draw->handle);
-
- return 0;
-}
-
-/**
- * Free drawable ID and memory to store information about it.
- */
-int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_draw *draw = data;
- unsigned long irqflags;
- struct drm_drawable_info *info;
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- info = drm_get_drawable_info(dev, draw->handle);
- if (info == NULL) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- return -EINVAL;
- }
- kfree(info->rects);
- kfree(info);
-
- idr_remove(&dev->drw_idr, draw->handle);
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_DEBUG("%d\n", draw->handle);
- return 0;
-}
-
-int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_update_draw *update = data;
- unsigned long irqflags;
- struct drm_clip_rect *rects;
- struct drm_drawable_info *info;
- int err;
-
- info = idr_find(&dev->drw_idr, update->handle);
- if (!info) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
- DRM_ERROR("No such drawable %d\n", update->handle);
- kfree(info);
- return -EINVAL;
- }
- }
-
- switch (update->type) {
- case DRM_DRAWABLE_CLIPRECTS:
- if (update->num == 0)
- rects = NULL;
- else if (update->num != info->num_rects) {
- rects = kmalloc(update->num *
- sizeof(struct drm_clip_rect),
- GFP_KERNEL);
- } else
- rects = info->rects;
-
- if (update->num && !rects) {
- DRM_ERROR("Failed to allocate cliprect memory\n");
- err = -ENOMEM;
- goto error;
- }
-
- if (update->num && DRM_COPY_FROM_USER(rects,
- (struct drm_clip_rect __user *)
- (unsigned long)update->data,
- update->num *
- sizeof(*rects))) {
- DRM_ERROR("Failed to copy cliprects from userspace\n");
- err = -EFAULT;
- goto error;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- if (rects != info->rects) {
- kfree(info->rects);
- }
-
- info->rects = rects;
- info->num_rects = update->num;
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- DRM_DEBUG("Updated %d cliprects for drawable %d\n",
- info->num_rects, update->handle);
- break;
- default:
- DRM_ERROR("Invalid update type %d\n", update->type);
- return -EINVAL;
- }
-
- return 0;
-
-error:
- if (rects != info->rects)
- kfree(rects);
-
- return err;
-}
-
-/**
- * Caller must hold the drawable spinlock!
- */
-struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
-{
- return idr_find(&dev->drw_idr, id);
-}
-EXPORT_SYMBOL(drm_get_drawable_info);
-
-static int drm_drawable_free(int idr, void *p, void *data)
-{
- struct drm_drawable_info *info = p;
-
- if (info) {
- kfree(info->rects);
- kfree(info);
- }
-
- return 0;
-}
-
-void drm_drawable_free_all(struct drm_device *dev)
-{
- idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
- idr_remove_all(&dev->drw_idr);
-}
+++ /dev/null
-/**
- * \file drm_drv.c
- * Generic driver template
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * To use this template, you must at least define the following (samples
- * given for the MGA driver):
- *
- * \code
- * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
- *
- * #define DRIVER_NAME "mga"
- * #define DRIVER_DESC "Matrox G200/G400"
- * #define DRIVER_DATE "20001127"
- *
- * #define drm_x mga_##x
- * \endcode
- */
-
-/*
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/debugfs.h>
-#include "drmP.h"
-#include "drm_core.h"
-
-
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-/** Ioctl table */
-static struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
- /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-#if __OS_HAS_AGP
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#endif
-
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
-};
-
-#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
- struct drm_vma_entry *vma, *vma_temp;
- int i;
-
- DRM_DEBUG("\n");
-
- if (dev->driver->lastclose)
- dev->driver->lastclose(dev);
- DRM_DEBUG("driver lastclose completed\n");
-
- if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- /* Free drawable information memory */
- drm_drawable_free_all(dev);
- del_timer(&dev->timer);
-
- /* Clear AGP information */
- if (drm_core_has_AGP(dev) && dev->agp &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_agp_mem *entry, *tempe;
-
- /* Remove AGP resources, but leave dev->agp
- intact until drv_cleanup is called. */
- list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
- if (entry->bound)
- drm_unbind_agp(entry->memory);
- drm_free_agp(entry->memory, entry->pages);
- kfree(entry);
- }
- INIT_LIST_HEAD(&dev->agp->memory);
-
- if (dev->agp->acquired)
- drm_agp_release(dev);
-
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
- }
- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_sg_cleanup(dev->sg);
- dev->sg = NULL;
- }
-
- /* Clear vma list (only built for debugging) */
- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
- list_del(&vma->head);
- kfree(vma);
- }
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
- for (i = 0; i < dev->queue_count; i++) {
- kfree(dev->queuelist[i]);
- dev->queuelist[i] = NULL;
- }
- kfree(dev->queuelist);
- dev->queuelist = NULL;
- }
- dev->queue_count = 0;
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_dma_takedown(dev);
-
- dev->dev_mapping = NULL;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("lastclose completed\n");
- return 0;
-}
-
-/**
- * Module initialization. Called via init_module at module load time, or via
- * linux/init/main.c (this is not currently supported).
- *
- * \return zero on success or a negative number on failure.
- *
- * Initializes an array of drm_device structures, and attempts to
- * initialize all available devices, using consecutive minors, registering the
- * stubs and initializing the AGP device.
- *
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
-int drm_init(struct drm_driver *driver)
-{
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *pid;
- int i;
-
- DRM_DEBUG("\n");
-
- INIT_LIST_HEAD(&driver->device_list);
-
- if (driver->driver_features & DRIVER_MODESET)
- return pci_register_driver(&driver->pci_driver);
-
- /* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
- pid = &driver->pci_driver.id_table[i];
-
- /* Loop around setting up a DRM device for each PCI device
- * matching our ID and device class. If we had the internal
- * function that pci_get_subsys and pci_get_class used, we'd
- * be able to just pass pid in instead of doing a two-stage
- * thing.
- */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
- pid->subdevice, pdev)) != NULL) {
- if ((pdev->class & pid->class_mask) != pid->class)
- continue;
-
- /* stealth mode requires a manual probe */
- pci_dev_get(pdev);
- drm_get_dev(pdev, pid, driver);
- }
- }
- return 0;
-}
-
-EXPORT_SYMBOL(drm_init);
-
-void drm_exit(struct drm_driver *driver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(&driver->pci_driver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
-
- DRM_INFO("Module unloaded\n");
-}
-
-EXPORT_SYMBOL(drm_exit);
-
-/** File operations structure */
-static const struct file_operations drm_stub_fops = {
- .owner = THIS_MODULE,
- .open = drm_stub_open
-};
-
-static int __init drm_core_init(void)
-{
- int ret = -ENOMEM;
-
- idr_init(&drm_minors_idr);
-
- if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
- goto err_p1;
-
- drm_class = drm_sysfs_create(THIS_MODULE, "drm");
- if (IS_ERR(drm_class)) {
- printk(KERN_ERR "DRM: Error creating drm class.\n");
- ret = PTR_ERR(drm_class);
- goto err_p2;
- }
-
- drm_proc_root = proc_mkdir("dri", NULL);
- if (!drm_proc_root) {
- DRM_ERROR("Cannot create /proc/dri\n");
- ret = -1;
- goto err_p3;
- }
-
- drm_debugfs_root = debugfs_create_dir("dri", NULL);
- if (!drm_debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
- ret = -1;
- goto err_p3;
- }
-
- DRM_INFO("Initialized %s %d.%d.%d %s\n",
- CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
- return 0;
-err_p3:
- drm_sysfs_destroy();
-err_p2:
- unregister_chrdev(DRM_MAJOR, "drm");
-
- idr_destroy(&drm_minors_idr);
-err_p1:
- return ret;
-}
-
-static void __exit drm_core_exit(void)
-{
- remove_proc_entry("dri", NULL);
- debugfs_remove(drm_debugfs_root);
- drm_sysfs_destroy();
-
- unregister_chrdev(DRM_MAJOR, "drm");
-
- idr_destroy(&drm_minors_idr);
-}
-
-module_init(drm_core_init);
-module_exit(drm_core_exit);
-
-/**
- * Copy and IOCTL return string to user space
- */
-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
-{
- int len;
-
- /* don't overflow userbuf */
- len = strlen(value);
- if (len > *buf_len)
- len = *buf_len;
-
- /* let userspace know exact length of driver value (which could be
- * larger than the userspace-supplied buffer) */
- *buf_len = strlen(value);
-
- /* finally, try filling in the userbuf */
- if (len && buf)
- if (copy_to_user(buf, value, len))
- return -EFAULT;
- return 0;
-}
-
-/**
- * Get version information
- *
- * \param inode device inode.
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_version structure.
- * \return zero on success or negative number on failure.
- *
- * Fills in the version information in \p arg.
- */
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_version *version = data;
- int err;
-
- version->version_major = dev->driver->major;
- version->version_minor = dev->driver->minor;
- version->version_patchlevel = dev->driver->patchlevel;
- err = drm_copy_field(version->name, &version->name_len,
- dev->driver->name);
- if (!err)
- err = drm_copy_field(version->date, &version->date_len,
- dev->driver->date);
- if (!err)
- err = drm_copy_field(version->desc, &version->desc_len,
- dev->driver->desc);
-
- return err;
-}
-
-/**
- * Called whenever a process performs an ioctl on /dev/drm.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
- *
- * Looks up the ioctl function in the ::ioctls table, checking for root
- * previleges if so required, and dispatches to the respective function.
- */
-int drm_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_ioctl_desc *ioctl;
- drm_ioctl_t *func;
- unsigned int nr = DRM_IOCTL_NR(cmd);
- int retcode = -EINVAL;
- char stack_kdata[128];
- char *kdata = NULL;
-
- atomic_inc(&dev->ioctl_count);
- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
- ++file_priv->ioctl_count;
-
- DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
- task_pid_nr(current), cmd, nr,
- (long)old_encode_dev(file_priv->minor->device),
- file_priv->authenticated);
-
- if ((nr >= DRM_CORE_IOCTL_COUNT) &&
- ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
- goto err_i1;
- if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
- ioctl = &drm_ioctls[nr];
- cmd = ioctl->cmd;
- } else
- goto err_i1;
-
- /* Do not trust userspace, use our own definition */
- func = ioctl->func;
- /* is there a local override? */
- if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
- func = dev->driver->dma_ioctl;
-
- if (!func) {
- DRM_DEBUG("no function\n");
- retcode = -EINVAL;
- } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
- ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
- ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
- (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
- retcode = -EACCES;
- } else {
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
- }
- }
-
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- _IOC_SIZE(cmd)) != 0) {
- retcode = -EFAULT;
- goto err_i1;
- }
- }
- retcode = func(dev, kdata, file_priv);
-
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- _IOC_SIZE(cmd)) != 0)
- retcode = -EFAULT;
- }
- }
-
- err_i1:
- if (kdata != stack_kdata)
- kfree(kdata);
- atomic_dec(&dev->ioctl_count);
- if (retcode)
- DRM_DEBUG("ret = %x\n", retcode);
- return retcode;
-}
-
-EXPORT_SYMBOL(drm_ioctl);
-
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && entry->map->type == _DRM_SHM &&
- (entry->map->flags & _DRM_CONTAINS_LOCK)) {
- return entry->map;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(drm_getsarea);
+++ /dev/null
-/*
- * Copyright (c) 2006 Luc Verhaegen (quirks list)
- * Copyright (c) 2007-2008 Intel Corporation
- * Jesse Barnes <jesse.barnes@intel.com>
- *
- * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
- * FB layer.
- * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include "drmP.h"
-#include "drm_edid.h"
-
-/*
- * TODO:
- * - support EDID 1.4 (incl. CE blocks)
- */
-
-/*
- * EDID blocks out in the wild have a variety of bugs, try to collect
- * them here (note that userspace may work around broken monitors first,
- * but fixes should make their way here so that the kernel "just works"
- * on as many displays as possible).
- */
-
-/* First detailed mode wrong, use largest 60Hz mode */
-#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
-/* Reported 135MHz pixel clock is too high, needs adjustment */
-#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
-/* Prefer the largest mode at 75 Hz */
-#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
-/* Detail timing is in cm not mm */
-#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
-/* Detailed timing descriptors have bogus size values, so just take the
- * maximum size and use that.
- */
-#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
-/* Monitor forgot to set the first detailed is preferred bit. */
-#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
-/* use +hsync +vsync for detailed mode */
-#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-/* define the number of Extension EDID block */
-#define MAX_EDID_EXT_NUM 4
-
-#define LEVEL_DMT 0
-#define LEVEL_GTF 1
-#define LEVEL_CVT 2
-
-static struct edid_quirk {
- char *vendor;
- int product_id;
- u32 quirks;
-} edid_quirk_list[] = {
- /* Acer AL1706 */
- { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
- /* Acer F51 */
- { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
- /* Unknown Acer */
- { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Belinea 10 15 55 */
- { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
- { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
-
- /* Envision Peripherals, Inc. EN-7100e */
- { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
-
- /* Funai Electronics PM36B */
- { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
- EDID_QUIRK_DETAILED_IN_CM },
-
- /* LG Philips LCD LP154W01-A5 */
- { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
- { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
-
- /* Philips 107p5 CRT */
- { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Proview AY765C */
- { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Samsung SyncMaster 205BW. Note: irony */
- { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
- /* Samsung SyncMaster 22[5-6]BW */
- { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
- { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
-};
-
-
-/* Valid EDID header has these bytes */
-static const u8 edid_header[] = {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
-};
-
-/**
- * edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity check the EDID block by looking at the header, the version number
- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
- * valid.
- */
-static bool edid_is_valid(struct edid *edid)
-{
- int i, score = 0;
- u8 csum = 0;
- u8 *raw_edid = (u8 *)edid;
-
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
-
- if (score == 8) ;
- else if (score >= 6) {
- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
- memcpy(raw_edid, edid_header, sizeof(edid_header));
- } else
- goto bad;
-
- for (i = 0; i < EDID_LENGTH; i++)
- csum += raw_edid[i];
- if (csum) {
- DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
- goto bad;
- }
-
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
- goto bad;
- }
-
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
-
- return 1;
-
-bad:
- if (raw_edid) {
- DRM_ERROR("Raw EDID:\n");
- print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
- printk("\n");
- }
- return 0;
-}
-
-/**
- * edid_vendor - match a string against EDID's obfuscated vendor field
- * @edid: EDID to match
- * @vendor: vendor string
- *
- * Returns true if @vendor is in @edid, false otherwise
- */
-static bool edid_vendor(struct edid *edid, char *vendor)
-{
- char edid_vendor[3];
-
- edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
- edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
- ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
- edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
-
- return !strncmp(edid_vendor, vendor, 3);
-}
-
-/**
- * edid_get_quirks - return quirk flags for a given EDID
- * @edid: EDID to process
- *
- * This tells subsequent routines what fixes they need to apply.
- */
-static u32 edid_get_quirks(struct edid *edid)
-{
- struct edid_quirk *quirk;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
- quirk = &edid_quirk_list[i];
-
- if (edid_vendor(edid, quirk->vendor) &&
- (EDID_PRODUCT_ID(edid) == quirk->product_id))
- return quirk->quirks;
- }
-
- return 0;
-}
-
-#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
-#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
-
-
-/**
- * edid_fixup_preferred - set preferred modes based on quirk list
- * @connector: has mode list to fix up
- * @quirks: quirks list
- *
- * Walk the mode list for @connector, clearing the preferred status
- * on existing modes and setting it anew for the right mode ala @quirks.
- */
-static void edid_fixup_preferred(struct drm_connector *connector,
- u32 quirks)
-{
- struct drm_display_mode *t, *cur_mode, *preferred_mode;
- int target_refresh = 0;
-
- if (list_empty(&connector->probed_modes))
- return;
-
- if (quirks & EDID_QUIRK_PREFER_LARGE_60)
- target_refresh = 60;
- if (quirks & EDID_QUIRK_PREFER_LARGE_75)
- target_refresh = 75;
-
- preferred_mode = list_first_entry(&connector->probed_modes,
- struct drm_display_mode, head);
-
- list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
- cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
-
- if (cur_mode == preferred_mode)
- continue;
-
- /* Largest mode is preferred */
- if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
- preferred_mode = cur_mode;
-
- /* At a given size, try to get closest to target refresh */
- if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
- MODE_REFRESH_DIFF(cur_mode, target_refresh) <
- MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
- preferred_mode = cur_mode;
- }
- }
-
- preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
-}
-
-/*
- * Add the Autogenerated from the DMT spec.
- * This table is copied from xfree86/modes/xf86EdidModes.c.
- * But the mode with Reduced blank feature is deleted.
- */
-static struct drm_display_mode drm_dmt_modes[] = {
- /* 640x350@85Hz */
- { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 350, 382, 385, 445, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x400@85Hz */
- { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 400, 401, 404, 445, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x400@85Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
- 828, 936, 0, 400, 401, 404, 446, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 492, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@85Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
- 752, 832, 0, 480, 481, 484, 509, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@56Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@72Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@85Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
- 896, 1048, 0, 600, 601, 604, 631, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 848x480@60Hz */
- { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
- 976, 1088, 0, 480, 486, 494, 517, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@43Hz, interlace */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@85Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1072, 1376, 0, 768, 769, 772, 808, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@75Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
- 1488, 1696, 0, 768, 771, 778, 805, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x768@85Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
- 1496, 1712, 0, 768, 771, 778, 809, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@75Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
- 1488, 1696, 0, 800, 803, 809, 838, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@85Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
- 1496, 1712, 0, 800, 803, 809, 843, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@85Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
- 1504, 1728, 0, 960, 961, 964, 1011, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@75Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@85Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
- 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@75Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
- 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@85Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
- 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@75Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
- 1688, 1936, 0, 900, 903, 909, 942, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@85Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
- 1696, 1952, 0, 900, 903, 909, 948, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@65Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@70Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@85Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@75Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
- 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@85Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
- 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1729x1344@75Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
- 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1856x1392@75Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
- 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@75Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
- 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@85Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
- 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@75Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
- 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@75HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@85HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-};
-static const int drm_num_dmt_modes =
- sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-
-static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
-{
- int i;
- struct drm_display_mode *ptr, *mode;
-
- mode = NULL;
- for (i = 0; i < drm_num_dmt_modes; i++) {
- ptr = &drm_dmt_modes[i];
- if (hsize == ptr->hdisplay &&
- vsize == ptr->vdisplay &&
- fresh == drm_mode_vrefresh(ptr)) {
- /* get the expected default mode */
- mode = drm_mode_duplicate(dev, ptr);
- break;
- }
- }
- return mode;
-}
-
-/*
- * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
- * monitors fill with ascii space (0x20) instead.
- */
-static int
-bad_std_timing(u8 a, u8 b)
-{
- return (a == 0x00 && b == 0x00) ||
- (a == 0x01 && b == 0x01) ||
- (a == 0x20 && b == 0x20);
-}
-
-/**
- * drm_mode_std - convert standard mode info (width, height, refresh) into mode
- * @t: standard timing params
- * @timing_level: standard timing level
- *
- * Take the standard timing params (in this case width, aspect, and refresh)
- * and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
- */
-struct drm_display_mode *drm_mode_std(struct drm_device *dev,
- struct std_timing *t,
- int revision,
- int timing_level)
-{
- struct drm_display_mode *mode;
- int hsize, vsize;
- int vrefresh_rate;
- unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
- >> EDID_TIMING_ASPECT_SHIFT;
- unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
- >> EDID_TIMING_VFREQ_SHIFT;
-
- if (bad_std_timing(t->hsize, t->vfreq_aspect))
- return NULL;
-
- /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
- hsize = t->hsize * 8 + 248;
- /* vrefresh_rate = vfreq + 60 */
- vrefresh_rate = vfreq + 60;
- /* the vdisplay is calculated based on the aspect ratio */
- if (aspect_ratio == 0) {
- if (revision < 3)
- vsize = hsize;
- else
- vsize = (hsize * 10) / 16;
- } else if (aspect_ratio == 1)
- vsize = (hsize * 3) / 4;
- else if (aspect_ratio == 2)
- vsize = (hsize * 4) / 5;
- else
- vsize = (hsize * 9) / 16;
- /* HDTV hack */
- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
- false);
- mode->hdisplay = 1366;
- mode->vsync_start = mode->vsync_start - 1;
- mode->vsync_end = mode->vsync_end - 1;
- return mode;
- }
- mode = NULL;
- /* check whether it can be found in default mode table */
- mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
- if (mode)
- return mode;
-
- switch (timing_level) {
- case LEVEL_DMT:
- break;
- case LEVEL_GTF:
- mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
- break;
- case LEVEL_CVT:
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
- false);
- break;
- }
- return mode;
-}
-
-/**
- * drm_mode_detailed - create a new mode from an EDID detailed timing section
- * @dev: DRM device (needed to create new mode)
- * @edid: EDID block
- * @timing: EDID detailed timing info
- * @quirks: quirks to apply
- *
- * An EDID detailed timing block contains enough info for us to create and
- * return a new struct drm_display_mode.
- */
-static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
- struct edid *edid,
- struct detailed_timing *timing,
- u32 quirks)
-{
- struct drm_display_mode *mode;
- struct detailed_pixel_timing *pt = &timing->data.pixel_data;
- unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
- unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
- unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
- unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
- unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
- unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
- unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
-
- /* ignore tiny modes */
- if (hactive < 64 || vactive < 64)
- return NULL;
-
- if (pt->misc & DRM_EDID_PT_STEREO) {
- printk(KERN_WARNING "stereo mode not supported\n");
- return NULL;
- }
- if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- printk(KERN_WARNING "integrated sync not supported\n");
- return NULL;
- }
-
- /* it is incorrect if hsync/vsync width is zero */
- if (!hsync_pulse_width || !vsync_pulse_width) {
- DRM_DEBUG_KMS("Incorrect Detailed timing. "
- "Wrong Hsync/Vsync pulse width\n");
- return NULL;
- }
- mode = drm_mode_create(dev);
- if (!mode)
- return NULL;
-
- mode->type = DRM_MODE_TYPE_DRIVER;
-
- if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
- timing->pixel_clock = cpu_to_le16(1088);
-
- mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
-
- mode->hdisplay = hactive;
- mode->hsync_start = mode->hdisplay + hsync_offset;
- mode->hsync_end = mode->hsync_start + hsync_pulse_width;
- mode->htotal = mode->hdisplay + hblank;
-
- mode->vdisplay = vactive;
- mode->vsync_start = mode->vdisplay + vsync_offset;
- mode->vsync_end = mode->vsync_start + vsync_pulse_width;
- mode->vtotal = mode->vdisplay + vblank;
-
- /* perform the basic check for the detailed timing */
- if (mode->hsync_end > mode->htotal ||
- mode->vsync_end > mode->vtotal) {
- drm_mode_destroy(dev, mode);
- DRM_DEBUG_KMS("Incorrect detailed timing. "
- "Sync is beyond the blank.\n");
- return NULL;
- }
-
- /* Some EDIDs have bogus h/vtotal values */
- if (mode->hsync_end > mode->htotal)
- mode->htotal = mode->hsync_end + 1;
- if (mode->vsync_end > mode->vtotal)
- mode->vtotal = mode->vsync_end + 1;
-
- drm_mode_set_name(mode);
-
- if (pt->misc & DRM_EDID_PT_INTERLACED)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
- pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
- }
-
- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
- mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
-
- mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
- mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
-
- if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
- mode->width_mm *= 10;
- mode->height_mm *= 10;
- }
-
- if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
- mode->width_mm = edid->width_cm * 10;
- mode->height_mm = edid->height_cm * 10;
- }
-
- return mode;
-}
-
-/*
- * Detailed mode info for the EDID "established modes" data to use.
- */
-static struct drm_display_mode edid_est_modes[] = {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
- 768, 864, 0, 480, 483, 486, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
- 846, 900, 0, 400, 421, 423, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
- 846, 900, 0, 400, 412, 414, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 776, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
- { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
- 928, 1152, 0, 624, 625, 628, 667, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
-};
-
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
-
-/**
- * add_established_modes - get est. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Each EDID block contains a bitmap of the supported "established modes" list
- * (defined above). Tease them out and add them to the global modes list.
- */
-static int add_established_modes(struct drm_connector *connector, struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- unsigned long est_bits = edid->established_timings.t1 |
- (edid->established_timings.t2 << 8) |
- ((edid->established_timings.mfg_rsvd & 0x80) << 9);
- int i, modes = 0;
-
- for (i = 0; i <= EDID_EST_TIMINGS; i++)
- if (est_bits & (1<<i)) {
- struct drm_display_mode *newmode;
- newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
-
- return modes;
-}
-/**
- * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
- */
-static int standard_timing_level(struct edid *edid)
-{
- if (edid->revision >= 2) {
- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
- return LEVEL_CVT;
- return LEVEL_GTF;
- }
- return LEVEL_DMT;
-}
-
-/**
- * add_standard_modes - get std. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Standard modes can be calculated using the CVT standard. Grab them from
- * @edid, calculate them, and add them to the list.
- */
-static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- int i, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
-
- for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct std_timing *t = &edid->standard_timings[i];
- struct drm_display_mode *newmode;
-
- /* If std timings bytes are 1, 1 it's empty */
- if (t->hsize == 1 && t->vfreq_aspect == 1)
- continue;
-
- newmode = drm_mode_std(dev, &edid->standard_timings[i],
- edid->revision, timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
-
- return modes;
-}
-
-/*
- * XXX fix this for:
- * - GTF secondary curve formula
- * - EDID 1.4 range offsets
- * - CVT extended bits
- */
-static bool
-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
-{
- struct detailed_data_monitor_range *range;
- int hsync, vrefresh;
-
- range = &timing->data.other_data.data.range;
-
- hsync = drm_mode_hsync(mode);
- vrefresh = drm_mode_vrefresh(mode);
-
- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
- return false;
-
- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
- return false;
-
- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
- /* be forgiving since it's in units of 10MHz */
- int max_clock = range->pixel_clock_mhz * 10 + 9;
- max_clock *= 1000;
- if (mode->clock > max_clock)
- return false;
- }
-
- return true;
-}
-
-/*
- * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
- * need to account for them.
- */
-static int drm_gtf_modes_for_range(struct drm_connector *connector,
- struct detailed_timing *timing)
-{
- int i, modes = 0;
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
-
- for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, timing)) {
- newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- }
-
- return modes;
-}
-
-static int drm_cvt_modes(struct drm_connector *connector,
- struct detailed_timing *timing)
-{
- int i, j, modes = 0;
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
- struct cvt_timing *cvt;
- const int rates[] = { 60, 85, 75, 60, 50 };
-
- for (i = 0; i < 4; i++) {
- int width, height;
- cvt = &(timing->data.other_data.data.cvt[i]);
-
- height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
- switch (cvt->code[1] & 0xc0) {
- case 0x00:
- width = height * 4 / 3;
- break;
- case 0x40:
- width = height * 16 / 9;
- break;
- case 0x80:
- width = height * 16 / 10;
- break;
- case 0xc0:
- width = height * 15 / 9;
- break;
- }
-
- for (j = 1; j < 5; j++) {
- if (cvt->code[2] & (1 << j)) {
- newmode = drm_cvt_mode(dev, width, height,
- rates[j], j == 0,
- false, false);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- }
- }
-
- return modes;
-}
-
-static int add_detailed_modes(struct drm_connector *connector,
- struct detailed_timing *timing,
- struct edid *edid, u32 quirks, int preferred)
-{
- int i, modes = 0;
- struct detailed_non_pixel *data = &timing->data.other_data;
- int timing_level = standard_timing_level(edid);
- int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
-
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- return 0;
-
- if (preferred)
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
-
- drm_mode_probed_add(connector, newmode);
- return 1;
- }
-
- /* other timing types */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_RANGE:
- if (gtf)
- modes += drm_gtf_modes_for_range(connector, timing);
- break;
- case EDID_DETAIL_STD_MODES:
- /* Six modes per detailed section */
- for (i = 0; i < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[i];
- newmode = drm_mode_std(dev, std, edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- case EDID_DETAIL_CVT_3BYTE:
- modes += drm_cvt_modes(connector, timing);
- break;
- default:
- break;
- }
-
- return modes;
-}
-
-/**
- * add_detailed_info - get detailed mode info from EDID data
- * @connector: attached connector
- * @edid: EDID block to scan
- * @quirks: quirks to apply
- *
- * Some of the detailed timing sections may contain mode information. Grab
- * it and add it to the list.
- */
-static int add_detailed_info(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
-{
- int i, modes = 0;
-
- for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
- struct detailed_timing *timing = &edid->detailed_timings[i];
- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
-
- /* In 1.0, only timings are allowed */
- if (!timing->pixel_clock && edid->version == 1 &&
- edid->revision == 0)
- continue;
-
- modes += add_detailed_modes(connector, timing, edid, quirks,
- preferred);
- }
-
- return modes;
-}
-
-/**
- * add_detailed_mode_eedid - get detailed mode info from addtional timing
- * EDID block
- * @connector: attached connector
- * @edid: EDID block to scan(It is only to get addtional timing EDID block)
- * @quirks: quirks to apply
- *
- * Some of the detailed timing sections may contain mode information. Grab
- * it and add it to the list.
- */
-static int add_detailed_info_eedid(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
-{
- int i, modes = 0;
- char *edid_ext = NULL;
- struct detailed_timing *timing;
- int edid_ext_num;
- int start_offset, end_offset;
- int timing_level;
-
- if (edid->version == 1 && edid->revision < 3) {
- /* If the EDID version is less than 1.3, there is no
- * extension EDID.
- */
- return 0;
- }
- if (!edid->extensions) {
- /* if there is no extension EDID, it is unnecessary to
- * parse the E-EDID to get detailed info
- */
- return 0;
- }
-
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
-
- /* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
- if (edid_ext[0] == 0x02)
- break;
- }
-
- if (i == edid_ext_num) {
- /* if there is no additional timing EDID block, return */
- return 0;
- }
-
- /* Get the start offset of detailed timing block */
- start_offset = edid_ext[2];
- if (start_offset == 0) {
- /* If the start_offset is zero, it means that neither detailed
- * info nor data block exist. In such case it is also
- * unnecessary to parse the detailed timing info.
- */
- return 0;
- }
-
- timing_level = standard_timing_level(edid);
- end_offset = EDID_LENGTH;
- end_offset -= sizeof(struct detailed_timing);
- for (i = start_offset; i < end_offset;
- i += sizeof(struct detailed_timing)) {
- timing = (struct detailed_timing *)(edid_ext + i);
- modes += add_detailed_modes(connector, timing, edid, quirks, 0);
- }
-
- return modes;
-}
-
-#define DDC_ADDR 0x50
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Try to fetch EDID information by calling i2c driver function.
- */
-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
- unsigned char *buf, int len)
-{
- unsigned char start = 0x0;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
-
- return -1;
-}
-EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-
-static int drm_ddc_read_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter,
- char *buf, int len)
-{
- int i;
-
- for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, buf, len))
- return -1;
- if (edid_is_valid((struct edid *)buf))
- return 0;
- }
-
- /* repeated checksum failures; warn, but carry on */
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return -1;
-}
-
-/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
- *
- * Poke the given connector's i2c channel to grab EDID data if possible.
- *
- * Return edid data or NULL if we couldn't find any.
- */
-struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter)
-{
- int ret;
- struct edid *edid;
-
- edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
- if (edid == NULL) {
- dev_warn(&connector->dev->pdev->dev,
- "Failed to allocate EDID\n");
- goto end;
- }
-
- /* Read first EDID block */
- ret = drm_ddc_read_edid(connector, adapter,
- (unsigned char *)edid, EDID_LENGTH);
- if (ret != 0)
- goto clean_up;
-
- /* There are EDID extensions to be read */
- if (edid->extensions != 0) {
- int edid_ext_num = edid->extensions;
-
- if (edid_ext_num > MAX_EDID_EXT_NUM) {
- dev_warn(&connector->dev->pdev->dev,
- "The number of extension(%d) is "
- "over max (%d), actually read number (%d)\n",
- edid_ext_num, MAX_EDID_EXT_NUM,
- MAX_EDID_EXT_NUM);
- /* Reset EDID extension number to be read */
- edid_ext_num = MAX_EDID_EXT_NUM;
- }
- /* Read EDID including extensions too */
- ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
- EDID_LENGTH * (edid_ext_num + 1));
- if (ret != 0)
- goto clean_up;
-
- }
-
- connector->display_info.raw_edid = (char *)edid;
- goto end;
-
-clean_up:
- kfree(edid);
- edid = NULL;
-end:
- return edid;
-
-}
-EXPORT_SYMBOL(drm_get_edid);
-
-#define HDMI_IDENTIFIER 0x000C03
-#define VENDOR_BLOCK 0x03
-/**
- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
- * @edid: monitor EDID information
- *
- * Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI, false if not or unknown.
- */
-bool drm_detect_hdmi_monitor(struct edid *edid)
-{
- char *edid_ext = NULL;
- int i, hdmi_id, edid_ext_num;
- int start_offset, end_offset;
- bool is_hdmi = false;
-
- /* No EDID or EDID extensions */
- if (edid == NULL || edid->extensions == 0)
- goto end;
-
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
-
- /* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
- if (edid_ext[0] == 0x02)
- break;
- }
-
- if (i == edid_ext_num)
- goto end;
-
- /* Data block offset in CEA extension block */
- start_offset = 4;
- end_offset = edid_ext[2];
-
- /*
- * Because HDMI identifier is in Vendor Specific Block,
- * search it from all data blocks of CEA extension.
- */
- for (i = start_offset; i < end_offset;
- /* Increased by data block len */
- i += ((edid_ext[i] & 0x1f) + 1)) {
- /* Find vendor specific block */
- if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
- hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
- edid_ext[i + 3] << 16;
- /* Find HDMI identifier */
- if (hdmi_id == HDMI_IDENTIFIER)
- is_hdmi = true;
- break;
- }
- }
-
-end:
- return is_hdmi;
-}
-EXPORT_SYMBOL(drm_detect_hdmi_monitor);
-
-/**
- * drm_add_edid_modes - add modes from EDID data, if available
- * @connector: connector we're probing
- * @edid: edid data
- *
- * Add the specified modes to the connector's mode list.
- *
- * Return number of modes added or 0 if we couldn't find any.
- */
-int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
-{
- int num_modes = 0;
- u32 quirks;
-
- if (edid == NULL) {
- return 0;
- }
- if (!edid_is_valid(edid)) {
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return 0;
- }
-
- quirks = edid_get_quirks(edid);
-
- num_modes += add_established_modes(connector, edid);
- num_modes += add_standard_modes(connector, edid);
- num_modes += add_detailed_info(connector, edid, quirks);
- num_modes += add_detailed_info_eedid(connector, edid, quirks);
-
- if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
- edid_fixup_preferred(connector, quirks);
-
- connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
- connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
- connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
- connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
- connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
- connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
- connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
- connector->display_info.width_mm = edid->width_cm * 10;
- connector->display_info.height_mm = edid->height_cm * 10;
- connector->display_info.gamma = edid->gamma;
- connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
- connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
- connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
- connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
- connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
- connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
- connector->display_info.gamma = edid->gamma;
-
- return num_modes;
-}
-EXPORT_SYMBOL(drm_add_edid_modes);
-
-/**
- * drm_add_modes_noedid - add modes for the connectors without EDID
- * @connector: connector we're probing
- * @hdisplay: the horizontal display limit
- * @vdisplay: the vertical display limit
- *
- * Add the specified modes to the connector's mode list. Only when the
- * hdisplay/vdisplay is not beyond the given limit, it will be added.
- *
- * Return number of modes added or 0 if we couldn't find any.
- */
-int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay)
-{
- int i, count, num_modes = 0;
- struct drm_display_mode *mode, *ptr;
- struct drm_device *dev = connector->dev;
-
- count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
- if (hdisplay < 0)
- hdisplay = 0;
- if (vdisplay < 0)
- vdisplay = 0;
-
- for (i = 0; i < count; i++) {
- ptr = &drm_dmt_modes[i];
- if (hdisplay && vdisplay) {
- /*
- * Only when two are valid, they will be used to check
- * whether the mode should be added to the mode list of
- * the connector.
- */
- if (ptr->hdisplay > hdisplay ||
- ptr->vdisplay > vdisplay)
- continue;
- }
- if (drm_mode_vrefresh(ptr) > 61)
- continue;
- mode = drm_mode_duplicate(dev, ptr);
- if (mode) {
- drm_mode_probed_add(connector, mode);
- num_modes++;
- }
- }
- return num_modes;
-}
-EXPORT_SYMBOL(drm_add_modes_noedid);
+++ /dev/null
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drm_encoder_slave.h"
-
-/**
- * drm_i2c_encoder_init - Initialize an I2C slave encoder
- * @dev: DRM device.
- * @encoder: Encoder to be attached to the I2C device. You aren't
- * required to have called drm_encoder_init() before.
- * @adap: I2C adapter that will be used to communicate with
- * the device.
- * @info: Information that will be used to create the I2C device.
- * Required fields are @addr and @type.
- *
- * Create an I2C device on the specified bus (the module containing its
- * driver is transparently loaded) and attach it to the specified
- * &drm_encoder_slave. The @slave_funcs field will be initialized with
- * the hooks provided by the slave driver.
- *
- * Returns 0 on success or a negative errno on failure, in particular,
- * -ENODEV is returned when no matching driver is found.
- */
-int drm_i2c_encoder_init(struct drm_device *dev,
- struct drm_encoder_slave *encoder,
- struct i2c_adapter *adap,
- const struct i2c_board_info *info)
-{
- char modalias[sizeof(I2C_MODULE_PREFIX)
- + I2C_NAME_SIZE];
- struct module *module = NULL;
- struct i2c_client *client;
- struct drm_i2c_encoder_driver *encoder_drv;
- int err = 0;
-
- snprintf(modalias, sizeof(modalias),
- "%s%s", I2C_MODULE_PREFIX, info->type);
- request_module(modalias);
-
- client = i2c_new_device(adap, info);
- if (!client) {
- err = -ENOMEM;
- goto fail;
- }
-
- if (!client->driver) {
- err = -ENODEV;
- goto fail_unregister;
- }
-
- module = client->driver->driver.owner;
- if (!try_module_get(module)) {
- err = -ENODEV;
- goto fail_unregister;
- }
-
- encoder->bus_priv = client;
-
- encoder_drv = to_drm_i2c_encoder_driver(client->driver);
-
- err = encoder_drv->encoder_init(client, dev, encoder);
- if (err)
- goto fail_unregister;
-
- return 0;
-
-fail_unregister:
- i2c_unregister_device(client);
- module_put(module);
-fail:
- return err;
-}
-EXPORT_SYMBOL(drm_i2c_encoder_init);
-
-/**
- * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
- * @drm_encoder: Encoder to be unregistered.
- *
- * This should be called from the @destroy method of an I2C slave
- * encoder driver once I2C access is no longer needed.
- */
-void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
-{
- struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
- struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
- struct module *module = client->driver->driver.owner;
-
- i2c_unregister_device(client);
- encoder->bus_priv = NULL;
-
- module_put(module);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_destroy);
+++ /dev/null
-/**
- * \file drm_fops.c
- * File operations for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Daryll Strauss <daryll@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include <linux/poll.h>
-#include <linux/smp_lock.h>
-
-static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_device * dev);
-
-static int drm_setup(struct drm_device * dev)
-{
- int i;
- int ret;
-
- if (dev->driver->firstopen) {
- ret = dev->driver->firstopen(dev);
- if (ret != 0)
- return ret;
- }
-
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- i = drm_dma_setup(dev);
- if (i < 0)
- return i;
- }
-
- for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
- atomic_set(&dev->counts[i], 0);
-
- dev->sigdata.lock = NULL;
-
- dev->queue_count = 0;
- dev->queue_reserved = 0;
- dev->queue_slots = 0;
- dev->queuelist = NULL;
- dev->context_flag = 0;
- dev->interrupt_flag = 0;
- dev->dma_flag = 0;
- dev->last_context = 0;
- dev->last_switch = 0;
- dev->last_checked = 0;
- init_waitqueue_head(&dev->context_wait);
- dev->if_version = 0;
-
- dev->ctx_start = 0;
- dev->lck_start = 0;
-
- dev->buf_async = NULL;
- init_waitqueue_head(&dev->buf_readers);
- init_waitqueue_head(&dev->buf_writers);
-
- DRM_DEBUG("\n");
-
- /*
- * The kernel's context could be created here, but is now created
- * in drm_dma_enqueue. This is more resource-efficient for
- * hardware that does not do DMA, but may mean that
- * drm_select_queue fails between the time the interrupt is
- * initialized and the time the queues are initialized.
- */
-
- return 0;
-}
-
-/**
- * Open file.
- *
- * \param inode device inode
- * \param filp file pointer.
- * \return zero on success or a negative number on failure.
- *
- * Searches the DRM device with the same minor number, calls open_helper(), and
- * increments the device open count. If the open count was previous at zero,
- * i.e., it's the first that the device is open, then calls setup().
- */
-int drm_open(struct inode *inode, struct file *filp)
-{
- struct drm_device *dev = NULL;
- int minor_id = iminor(inode);
- struct drm_minor *minor;
- int retcode = 0;
-
- minor = idr_find(&drm_minors_idr, minor_id);
- if (!minor)
- return -ENODEV;
-
- if (!(dev = minor->dev))
- return -ENODEV;
-
- retcode = drm_open_helper(inode, filp, dev);
- if (!retcode) {
- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- spin_lock(&dev->count_lock);
- if (!dev->open_count++) {
- spin_unlock(&dev->count_lock);
- retcode = drm_setup(dev);
- goto out;
- }
- spin_unlock(&dev->count_lock);
- }
-out:
- mutex_lock(&dev->struct_mutex);
- if (minor->type == DRM_MINOR_LEGACY) {
- BUG_ON((dev->dev_mapping != NULL) &&
- (dev->dev_mapping != inode->i_mapping));
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = inode->i_mapping;
- }
- mutex_unlock(&dev->struct_mutex);
-
- return retcode;
-}
-EXPORT_SYMBOL(drm_open);
-
-/**
- * File \c open operation.
- *
- * \param inode device inode.
- * \param filp file pointer.
- *
- * Puts the dev->fops corresponding to the device minor number into
- * \p filp, call the \c open method, and restore the file operations.
- */
-int drm_stub_open(struct inode *inode, struct file *filp)
-{
- struct drm_device *dev = NULL;
- struct drm_minor *minor;
- int minor_id = iminor(inode);
- int err = -ENODEV;
- const struct file_operations *old_fops;
-
- DRM_DEBUG("\n");
-
- /* BKL pushdown: note that nothing else serializes idr_find() */
- lock_kernel();
- minor = idr_find(&drm_minors_idr, minor_id);
- if (!minor)
- goto out;
-
- if (!(dev = minor->dev))
- goto out;
-
- old_fops = filp->f_op;
- filp->f_op = fops_get(&dev->driver->fops);
- if (filp->f_op == NULL) {
- filp->f_op = old_fops;
- goto out;
- }
- if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
- fops_put(filp->f_op);
- filp->f_op = fops_get(old_fops);
- }
- fops_put(old_fops);
-
-out:
- unlock_kernel();
- return err;
-}
-
-/**
- * Check whether DRI will run on this CPU.
- *
- * \return non-zero if the DRI will run on this CPU, or zero otherwise.
- */
-static int drm_cpu_valid(void)
-{
-#if defined(__i386__)
- if (boot_cpu_data.x86 == 3)
- return 0; /* No cmpxchg on a 386 */
-#endif
-#if defined(__sparc__) && !defined(__sparc_v9__)
- return 0; /* No cmpxchg before v9 sparc. */
-#endif
- return 1;
-}
-
-/**
- * Called whenever a process opens /dev/drm.
- *
- * \param inode device inode.
- * \param filp file pointer.
- * \param dev device.
- * \return zero on success or a negative number on failure.
- *
- * Creates and initializes a drm_file structure for the file private data in \p
- * filp and add it into the double linked list in \p dev.
- */
-static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_device * dev)
-{
- int minor_id = iminor(inode);
- struct drm_file *priv;
- int ret;
-
- if (filp->f_flags & O_EXCL)
- return -EBUSY; /* No exclusive opens */
- if (!drm_cpu_valid())
- return -EINVAL;
-
- DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
-
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- memset(priv, 0, sizeof(*priv));
- filp->private_data = priv;
- priv->filp = filp;
- priv->uid = current_euid();
- priv->pid = task_pid_nr(current);
- priv->minor = idr_find(&drm_minors_idr, minor_id);
- priv->ioctl_count = 0;
- /* for compatibility root is always authenticated */
- priv->authenticated = capable(CAP_SYS_ADMIN);
- priv->lock_count = 0;
-
- INIT_LIST_HEAD(&priv->lhead);
- INIT_LIST_HEAD(&priv->fbs);
- INIT_LIST_HEAD(&priv->event_list);
- init_waitqueue_head(&priv->event_wait);
- priv->event_space = 4096; /* set aside 4k for event buffer */
-
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_open(dev, priv);
-
- if (dev->driver->open) {
- ret = dev->driver->open(dev, priv);
- if (ret < 0)
- goto out_free;
- }
-
-
- /* if there is no current master make this fd it */
- mutex_lock(&dev->struct_mutex);
- if (!priv->minor->master) {
- /* create a new master */
- priv->minor->master = drm_master_create(priv->minor);
- if (!priv->minor->master) {
- mutex_unlock(&dev->struct_mutex);
- ret = -ENOMEM;
- goto out_free;
- }
-
- priv->is_master = 1;
- /* take another reference for the copy in the local file priv */
- priv->master = drm_master_get(priv->minor->master);
-
- priv->authenticated = 1;
-
- mutex_unlock(&dev->struct_mutex);
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, priv->master);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- /* drop both references if this fails */
- drm_master_put(&priv->minor->master);
- drm_master_put(&priv->master);
- mutex_unlock(&dev->struct_mutex);
- goto out_free;
- }
- }
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, priv, true);
- if (ret) {
- /* drop both references if this fails */
- drm_master_put(&priv->minor->master);
- drm_master_put(&priv->master);
- mutex_unlock(&dev->struct_mutex);
- goto out_free;
- }
- }
- mutex_unlock(&dev->struct_mutex);
- } else {
- /* get a reference to the master */
- priv->master = drm_master_get(priv->minor->master);
- mutex_unlock(&dev->struct_mutex);
- }
-
- mutex_lock(&dev->struct_mutex);
- list_add(&priv->lhead, &dev->filelist);
- mutex_unlock(&dev->struct_mutex);
-
-#ifdef __alpha__
- /*
- * Default the hose
- */
- if (!dev->hose) {
- struct pci_dev *pci_dev;
- pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
- if (pci_dev) {
- dev->hose = pci_dev->sysdata;
- pci_dev_put(pci_dev);
- }
- if (!dev->hose) {
- struct pci_bus *b = pci_bus_b(pci_root_buses.next);
- if (b)
- dev->hose = b->sysdata;
- }
- }
-#endif
-
- return 0;
- out_free:
- kfree(priv);
- filp->private_data = NULL;
- return ret;
-}
-
-/** No-op. */
-int drm_fasync(int fd, struct file *filp, int on)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
- (long)old_encode_dev(priv->minor->device));
- return fasync_helper(fd, filp, on, &dev->buf_async);
-}
-EXPORT_SYMBOL(drm_fasync);
-
-/*
- * Reclaim locked buffers; note that this may be a bad idea if the current
- * context doesn't have the hw lock...
- */
-static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
-{
- struct drm_file *file_priv = f->private_data;
-
- if (drm_i_have_hw_lock(dev, file_priv)) {
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- } else {
- unsigned long _end = jiffies + 3 * DRM_HZ;
- int locked = 0;
-
- drm_idlelock_take(&file_priv->master->lock);
-
- /*
- * Wait for a while.
- */
- do {
- spin_lock_bh(&file_priv->master->lock.spinlock);
- locked = file_priv->master->lock.idle_has_lock;
- spin_unlock_bh(&file_priv->master->lock.spinlock);
- if (locked)
- break;
- schedule();
- } while (!time_after_eq(jiffies, _end));
-
- if (!locked) {
- DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
- "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
- "\tI will go on reclaiming the buffers anyway.\n");
- }
-
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-}
-
-static void drm_master_release(struct drm_device *dev, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
-
- if (dev->driver->reclaim_buffers_locked &&
- file_priv->master->lock.hw_lock)
- drm_reclaim_locked_buffers(dev, filp);
-
- if (dev->driver->reclaim_buffers_idlelocked &&
- file_priv->master->lock.hw_lock) {
- drm_idlelock_take(&file_priv->master->lock);
- dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-
-
- if (drm_i_have_hw_lock(dev, file_priv)) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- drm_lock_free(&file_priv->master->lock,
- _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- }
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !dev->driver->reclaim_buffers_locked) {
- dev->driver->reclaim_buffers(dev, file_priv);
- }
-}
-
-static void drm_events_release(struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e, *et;
- struct drm_pending_vblank_event *v, *vt;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- /* Remove pending flips */
- list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
- if (v->base.file_priv == file_priv) {
- list_del(&v->base.link);
- drm_vblank_put(dev, v->pipe);
- v->base.destroy(&v->base);
- }
-
- /* Remove unconsumed events */
- list_for_each_entry_safe(e, et, &file_priv->event_list, link)
- e->destroy(e);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-/**
- * Release file.
- *
- * \param inode device inode
- * \param file_priv DRM file private.
- * \return zero on success or a negative number on failure.
- *
- * If the hardware lock is held then free it, and take it again for the kernel
- * context since it's necessary to reclaim buffers. Unlink the file private
- * data from its list and free it. Decreases the open count and if it reaches
- * zero calls drm_lastclose().
- */
-int drm_release(struct inode *inode, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
- int retcode = 0;
-
- lock_kernel();
-
- DRM_DEBUG("open_count = %d\n", dev->open_count);
-
- if (dev->driver->preclose)
- dev->driver->preclose(dev, file_priv);
-
- /* ========================================================
- * Begin inline drm_release
- */
-
- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->device),
- dev->open_count);
-
- /* if the master has gone away we can't do anything with the lock */
- if (file_priv->minor->master)
- drm_master_release(dev, filp);
-
- drm_events_release(file_priv);
-
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_release(dev, file_priv);
-
- if (dev->driver->driver_features & DRIVER_MODESET)
- drm_fb_release(file_priv);
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == file_priv &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev,
- pos->handle);
-
- drm_ctxbitmap_free(dev, pos->handle);
-
- list_del(&pos->head);
- kfree(pos);
- --dev->ctx_count;
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-
- mutex_lock(&dev->struct_mutex);
-
- if (file_priv->is_master) {
- struct drm_master *master = file_priv->master;
- struct drm_file *temp;
- list_for_each_entry(temp, &dev->filelist, lhead) {
- if ((temp->master == file_priv->master) &&
- (temp != file_priv))
- temp->authenticated = 0;
- }
-
- /**
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
-
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
-
- if (file_priv->minor->master == file_priv->master) {
- /* drop the reference held my the minor */
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, true);
- drm_master_put(&file_priv->minor->master);
- }
- }
-
- /* drop the reference held my the file priv */
- drm_master_put(&file_priv->master);
- file_priv->is_master = 0;
- list_del(&file_priv->lhead);
- mutex_unlock(&dev->struct_mutex);
-
- if (dev->driver->postclose)
- dev->driver->postclose(dev, file_priv);
- kfree(file_priv);
-
- /* ========================================================
- * End inline drm_release
- */
-
- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
- spin_lock(&dev->count_lock);
- if (!--dev->open_count) {
- if (atomic_read(&dev->ioctl_count)) {
- DRM_ERROR("Device busy: %d\n",
- atomic_read(&dev->ioctl_count));
- spin_unlock(&dev->count_lock);
- unlock_kernel();
- return -EBUSY;
- }
- spin_unlock(&dev->count_lock);
- unlock_kernel();
- return drm_lastclose(dev);
- }
- spin_unlock(&dev->count_lock);
-
- unlock_kernel();
-
- return retcode;
-}
-EXPORT_SYMBOL(drm_release);
-
-static bool
-drm_dequeue_event(struct drm_file *file_priv,
- size_t total, size_t max, struct drm_pending_event **out)
-{
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e;
- unsigned long flags;
- bool ret = false;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- *out = NULL;
- if (list_empty(&file_priv->event_list))
- goto out;
- e = list_first_entry(&file_priv->event_list,
- struct drm_pending_event, link);
- if (e->event->length + total > max)
- goto out;
-
- file_priv->event_space += e->event->length;
- list_del(&e->link);
- *out = e;
- ret = true;
-
-out:
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return ret;
-}
-
-ssize_t drm_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_pending_event *e;
- size_t total;
- ssize_t ret;
-
- ret = wait_event_interruptible(file_priv->event_wait,
- !list_empty(&file_priv->event_list));
- if (ret < 0)
- return ret;
-
- total = 0;
- while (drm_dequeue_event(file_priv, total, count, &e)) {
- if (copy_to_user(buffer + total,
- e->event, e->event->length)) {
- total = -EFAULT;
- break;
- }
-
- total += e->event->length;
- e->destroy(e);
- }
-
- return total;
-}
-EXPORT_SYMBOL(drm_read);
-
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
-{
- struct drm_file *file_priv = filp->private_data;
- unsigned int mask = 0;
-
- poll_wait(filp, &file_priv->event_wait, wait);
-
- if (!list_empty(&file_priv->event_list))
- mask |= POLLIN | POLLRDNORM;
-
- return mask;
-}
-EXPORT_SYMBOL(drm_poll);
+++ /dev/null
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <linux/pagemap.h>
-#include "drmP.h"
-
-/** @file drm_gem.c
- *
- * This file provides some of the base ioctls and library routines for
- * the graphics memory manager implemented by each device driver.
- *
- * Because various devices have different requirements in terms of
- * synchronization and migration strategies, implementing that is left up to
- * the driver, and all that the general API provides should be generic --
- * allocating objects, reading/writing data with the cpu, freeing objects.
- * Even there, platform-dependent optimizations for reading/writing data with
- * the CPU mean we'll likely hook those out to driver-specific calls. However,
- * the DRI2 implementation wants to have at least allocate/mmap be generic.
- *
- * The goal was to have swap-backed object allocation managed through
- * struct file. However, file descriptors as handles to a struct file have
- * two major failings:
- * - Process limits prevent more than 1024 or so being used at a time by
- * default.
- * - Inability to allocate high fds will aggravate the X Server's select()
- * handling, and likely that of many GL client applications as well.
- *
- * This led to a plan of using our own integer IDs (called handles, following
- * DRM terminology) to mimic fds, and implement the fd syscalls we need as
- * ioctls. The objects themselves will still include the struct file so
- * that we can transition to fds if the required kernel infrastructure shows
- * up at a later date, and as our interface with shmfs for memory allocation.
- */
-
-/*
- * We make up offsets for buffer objects so we can recognize them at
- * mmap time.
- */
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-
-/**
- * Initialize the GEM device fields
- */
-
-int
-drm_gem_init(struct drm_device *dev)
-{
- struct drm_gem_mm *mm;
-
- spin_lock_init(&dev->object_name_lock);
- idr_init(&dev->object_name_idr);
- atomic_set(&dev->object_count, 0);
- atomic_set(&dev->object_memory, 0);
- atomic_set(&dev->pin_count, 0);
- atomic_set(&dev->pin_memory, 0);
- atomic_set(&dev->gtt_count, 0);
- atomic_set(&dev->gtt_memory, 0);
-
- mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
- if (!mm) {
- DRM_ERROR("out of memory\n");
- return -ENOMEM;
- }
-
- dev->mm_private = mm;
-
- if (drm_ht_create(&mm->offset_hash, 19)) {
- kfree(mm);
- return -ENOMEM;
- }
-
- if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE)) {
- drm_ht_remove(&mm->offset_hash);
- kfree(mm);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void
-drm_gem_destroy(struct drm_device *dev)
-{
- struct drm_gem_mm *mm = dev->mm_private;
-
- drm_mm_takedown(&mm->offset_manager);
- drm_ht_remove(&mm->offset_hash);
- kfree(mm);
- dev->mm_private = NULL;
-}
-
-/**
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
-{
- struct drm_gem_object *obj;
-
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- goto free;
-
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
- goto free;
-
- /* Basically we want to disable the OOM killer and handle ENOMEM
- * ourselves by sacrificing pages from cached buffers.
- * XXX shmem_file_[gs]et_gfp_mask()
- */
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_FS |
- __GFP_RECLAIMABLE |
- __GFP_NORETRY |
- __GFP_NOWARN |
- __GFP_NOMEMALLOC);
-
- kref_init(&obj->refcount);
- kref_init(&obj->handlecount);
- obj->size = size;
- if (dev->driver->gem_init_object != NULL &&
- dev->driver->gem_init_object(obj) != 0) {
- goto fput;
- }
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
- return obj;
-fput:
- fput(obj->filp);
-free:
- kfree(obj);
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_object_alloc);
-
-/**
- * Removes the mapping from handle to filp for this object.
- */
-static int
-drm_gem_handle_delete(struct drm_file *filp, u32 handle)
-{
- struct drm_device *dev;
- struct drm_gem_object *obj;
-
- /* This is gross. The idr system doesn't let us try a delete and
- * return an error code. It just spews if you fail at deleting.
- * So, we have to grab a lock around finding the object and then
- * doing the delete on it and dropping the refcount, or the user
- * could race us to double-decrement the refcount and cause a
- * use-after-free later. Given the frequency of our handle lookups,
- * we may want to use ida for number allocation and a hash table
- * for the pointers, anyway.
- */
- spin_lock(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- return -EINVAL;
- }
- dev = obj->dev;
-
- /* Release reference and decrement refcount. */
- idr_remove(&filp->object_idr, handle);
- spin_unlock(&filp->table_lock);
-
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
- */
-int
-drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- u32 *handlep)
-{
- int ret;
-
- /*
- * Get the user-visible handle using idr.
- */
-again:
- /* ensure there is space available to allocate a handle */
- if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
-
- /* do the allocation under our spinlock */
- spin_lock(&file_priv->table_lock);
- ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
- spin_unlock(&file_priv->table_lock);
- if (ret == -EAGAIN)
- goto again;
-
- if (ret != 0)
- return ret;
-
- drm_gem_object_handle_reference(obj);
- return 0;
-}
-EXPORT_SYMBOL(drm_gem_handle_create);
-
-/** Returns a reference to the object named by the handle. */
-struct drm_gem_object *
-drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
- u32 handle)
-{
- struct drm_gem_object *obj;
-
- spin_lock(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- return NULL;
- }
-
- drm_gem_object_reference(obj);
-
- spin_unlock(&filp->table_lock);
-
- return obj;
-}
-EXPORT_SYMBOL(drm_gem_object_lookup);
-
-/**
- * Releases the handle to an mm object.
- */
-int
-drm_gem_close_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_close *args = data;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- ret = drm_gem_handle_delete(file_priv, args->handle);
-
- return ret;
-}
-
-/**
- * Create a global name for an object, returning the name.
- *
- * Note that the name does not hold a reference; when the object
- * is freed, the name goes away.
- */
-int
-drm_gem_flink_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_flink *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
-
-again:
- if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
- ret = -ENOMEM;
- goto err;
- }
-
- spin_lock(&dev->object_name_lock);
- if (!obj->name) {
- ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
- &obj->name);
- args->name = (uint64_t) obj->name;
- spin_unlock(&dev->object_name_lock);
-
- if (ret == -EAGAIN)
- goto again;
-
- if (ret != 0)
- goto err;
-
- /* Allocate a reference for the name table. */
- drm_gem_object_reference(obj);
- } else {
- args->name = (uint64_t) obj->name;
- spin_unlock(&dev->object_name_lock);
- ret = 0;
- }
-
-err:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/**
- * Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
- */
-int
-drm_gem_open_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_open *args = data;
- struct drm_gem_object *obj;
- int ret;
- u32 handle;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- spin_lock(&dev->object_name_lock);
- obj = idr_find(&dev->object_name_idr, (int) args->name);
- if (obj)
- drm_gem_object_reference(obj);
- spin_unlock(&dev->object_name_lock);
- if (!obj)
- return -ENOENT;
-
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (ret)
- return ret;
-
- args->handle = handle;
- args->size = obj->size;
-
- return 0;
-}
-
-/**
- * Called at device open time, sets up the structure for handling refcounting
- * of mm objects.
- */
-void
-drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
-{
- idr_init(&file_private->object_idr);
- spin_lock_init(&file_private->table_lock);
-}
-
-/**
- * Called at device close to release the file's
- * handle references on objects.
- */
-static int
-drm_gem_object_release_handle(int id, void *ptr, void *data)
-{
- struct drm_gem_object *obj = ptr;
-
- drm_gem_object_handle_unreference(obj);
-
- return 0;
-}
-
-/**
- * Called at close time when the filp is going away.
- *
- * Releases any remaining references on objects by this filp.
- */
-void
-drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
-{
- mutex_lock(&dev->struct_mutex);
- idr_for_each(&file_private->object_idr,
- &drm_gem_object_release_handle, NULL);
-
- idr_destroy(&file_private->object_idr);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * Called after the last reference to the object has been lost.
- *
- * Frees the object
- */
-void
-drm_gem_object_free(struct kref *kref)
-{
- struct drm_gem_object *obj = (struct drm_gem_object *) kref;
- struct drm_device *dev = obj->dev;
-
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (dev->driver->gem_free_object != NULL)
- dev->driver->gem_free_object(obj);
-
- fput(obj->filp);
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
- kfree(obj);
-}
-EXPORT_SYMBOL(drm_gem_object_free);
-
-/**
- * Called after the last handle to the object has been closed
- *
- * Removes any name for the object. Note that this must be
- * called before drm_gem_object_free or we'll be touching
- * freed memory
- */
-void
-drm_gem_object_handle_free(struct kref *kref)
-{
- struct drm_gem_object *obj = container_of(kref,
- struct drm_gem_object,
- handlecount);
- struct drm_device *dev = obj->dev;
-
- /* Remove any name for this object */
- spin_lock(&dev->object_name_lock);
- if (obj->name) {
- idr_remove(&dev->object_name_idr, obj->name);
- obj->name = 0;
- spin_unlock(&dev->object_name_lock);
- /*
- * The object name held a reference to this object, drop
- * that now.
- */
- drm_gem_object_unreference(obj);
- } else
- spin_unlock(&dev->object_name_lock);
-
-}
-EXPORT_SYMBOL(drm_gem_object_handle_free);
-
-void drm_gem_vm_open(struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj = vma->vm_private_data;
-
- drm_gem_object_reference(obj);
-}
-EXPORT_SYMBOL(drm_gem_vm_open);
-
-void drm_gem_vm_close(struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-}
-EXPORT_SYMBOL(drm_gem_vm_close);
-
-
-/**
- * drm_gem_mmap - memory map routine for GEM objects
- * @filp: DRM file pointer
- * @vma: VMA for the area to be mapped
- *
- * If a driver supports GEM object mapping, mmap calls on the DRM file
- * descriptor will end up here.
- *
- * If we find the object based on the offset passed in (vma->vm_pgoff will
- * contain the fake offset we created when the GTT map ioctl was called on
- * the object), we set up the driver fault handler so that any accesses
- * to the object can be trapped, to perform migration, GTT binding, surface
- * register allocation, or performance monitoring.
- */
-int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_local_map *map = NULL;
- struct drm_gem_object *obj;
- struct drm_hash_item *hash;
- int ret = 0;
-
- mutex_lock(&dev->struct_mutex);
-
- if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
- mutex_unlock(&dev->struct_mutex);
- return drm_mmap(filp, vma);
- }
-
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map ||
- ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- /* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- obj = map->handle;
- if (!obj->dev->driver->gem_vm_ops) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
- vma->vm_ops = obj->dev->driver->gem_vm_ops;
- vma->vm_private_data = map->handle;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
- /* Take a ref for this mapping of the object, so that the fault
- * handler can dereference the mmap offset's pointer to the object.
- * This reference is cleaned up by the corresponding vm_close
- * (which should happen whether the vma was created by this call, or
- * by a vm_open due to mremap or partial unmap or whatever).
- */
- drm_gem_object_reference(obj);
-
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
-
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_mmap);
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-#include "drm_hashtab.h"
-#include <linux/hash.h>
-
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
-{
- unsigned int i;
-
- ht->size = 1 << order;
- ht->order = order;
- ht->fill = 0;
- ht->table = NULL;
- ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
- if (!ht->use_vmalloc) {
- ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
- }
- if (!ht->table) {
- ht->use_vmalloc = 1;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
- }
- if (!ht->table) {
- DRM_ERROR("Out of memory for hash table\n");
- return -ENOMEM;
- }
- for (i=0; i< ht->size; ++i) {
- INIT_HLIST_HEAD(&ht->table[i]);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_create);
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *list;
- unsigned int hashed_key;
- int count = 0;
-
- hashed_key = hash_long(key, ht->order);
- DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
- h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
- DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
- }
-}
-
-static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
- unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
- if (entry->key == key)
- return list;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *list, *parent;
- unsigned int hashed_key;
- unsigned long key = item->key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- parent = NULL;
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
- if (entry->key == key)
- return -EINVAL;
- if (entry->key > key)
- break;
- parent = list;
- }
- if (parent) {
- hlist_add_after(parent, &item->head);
- } else {
- hlist_add_head(&item->head, h_list);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_insert_item);
-
-/*
- * Just insert an item and return any "bits" bit key that hasn't been
- * used before.
- */
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add)
-{
- int ret;
- unsigned long mask = (1 << bits) - 1;
- unsigned long first, unshifted_key;
-
- unshifted_key = hash_long(seed, bits);
- first = unshifted_key;
- do {
- item->key = (unshifted_key << shift) + add;
- ret = drm_ht_insert_item(ht, item);
- if (ret)
- unshifted_key = (unshifted_key + 1) & mask;
- } while(ret && (unshifted_key != first));
-
- if (ret) {
- DRM_ERROR("Available key bit space exhausted\n");
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_just_insert_please);
-
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
- struct drm_hash_item **item)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key(ht, key);
- if (!list)
- return -EINVAL;
-
- *item = hlist_entry(list, struct drm_hash_item, head);
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_find_item);
-
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key(ht, key);
- if (list) {
- hlist_del_init(list);
- ht->fill--;
- return 0;
- }
- return -EINVAL;
-}
-
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- hlist_del_init(&item->head);
- ht->fill--;
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_remove_item);
-
-void drm_ht_remove(struct drm_open_hash *ht)
-{
- if (ht->table) {
- if (ht->use_vmalloc)
- vfree(ht->table);
- else
- kfree(ht->table);
- ht->table = NULL;
- }
-}
-EXPORT_SYMBOL(drm_ht_remove);
+++ /dev/null
-/**
- * \file drm_info.c
- * DRM info file implementations
- *
- * \author Ben Gamari <bgamari@gmail.com>
- */
-
-/*
- * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2008 Ben Gamari <bgamari@gmail.com>
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include "drmP.h"
-
-/**
- * Called when "/proc/dri/.../name" is read.
- *
- * Prints the device name together with the bus id if available.
- */
-int drm_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
- struct drm_master *master = minor->master;
-
- if (!master)
- return 0;
-
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->pci_driver.name,
- pci_name(dev->pdev), master->unique);
- } else {
- seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
- pci_name(dev->pdev));
- }
-
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../vm" is read.
- *
- * Prints information about all mappings in drm_device::maplist.
- */
-int drm_vm_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
-
- /* Hardcoded from _DRM_FRAME_BUFFER,
- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
- const char *type;
- int i;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "slot offset size type flags address mtrr\n\n");
- i = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- map = r_list->map;
- if (!map)
- continue;
- if (map->type < 0 || map->type > 5)
- type = "??";
- else
- type = types[map->type];
-
- seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s 0x%02x 0x%08lx ",
- i,
- (unsigned long long)map->offset,
- map->size, type, map->flags,
- (unsigned long) r_list->user_token);
- if (map->mtrr < 0)
- seq_printf(m, "none\n");
- else
- seq_printf(m, "%4d\n", map->mtrr);
- i++;
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../queues" is read.
- */
-int drm_queues_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int i;
- struct drm_queue *q;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, " ctx/flags use fin"
- " blk/rw/rwf wait flushed queued"
- " locks\n\n");
- for (i = 0; i < dev->queue_count; i++) {
- q = dev->queuelist[i];
- atomic_inc(&q->use_count);
- seq_printf(m, "%5d/0x%03x %5d %5d"
- " %5d/%c%c/%c%c%c %5Zd\n",
- i,
- q->flags,
- atomic_read(&q->use_count),
- atomic_read(&q->finalization),
- atomic_read(&q->block_count),
- atomic_read(&q->block_read) ? 'r' : '-',
- atomic_read(&q->block_write) ? 'w' : '-',
- waitqueue_active(&q->read_queue) ? 'r' : '-',
- waitqueue_active(&q->write_queue) ? 'w' : '-',
- waitqueue_active(&q->flush_queue) ? 'f' : '-',
- DRM_BUFCOUNT(&q->waitlist));
- atomic_dec(&q->use_count);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../bufs" is read.
- */
-int drm_bufs_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_device_dma *dma;
- int i, seg_pages;
-
- mutex_lock(&dev->struct_mutex);
- dma = dev->dma;
- if (!dma) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- seq_printf(m, " o size count free segs pages kB\n\n");
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].buf_count) {
- seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
- seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
- i,
- dma->bufs[i].buf_size,
- dma->bufs[i].buf_count,
- atomic_read(&dma->bufs[i].freelist.count),
- dma->bufs[i].seg_count,
- seg_pages,
- seg_pages * PAGE_SIZE / 1024);
- }
- }
- seq_printf(m, "\n");
- for (i = 0; i < dma->buf_count; i++) {
- if (i && !(i % 32))
- seq_printf(m, "\n");
- seq_printf(m, " %d", dma->buflist[i]->list);
- }
- seq_printf(m, "\n");
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../vblank" is read.
- */
-int drm_vblank_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int crtc;
-
- mutex_lock(&dev->struct_mutex);
- for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
- seq_printf(m, "CRTC %d enable: %d\n",
- crtc, atomic_read(&dev->vblank_refcount[crtc]));
- seq_printf(m, "CRTC %d counter: %d\n",
- crtc, drm_vblank_count(dev, crtc));
- seq_printf(m, "CRTC %d last wait: %d\n",
- crtc, dev->last_vblank_wait[crtc]);
- seq_printf(m, "CRTC %d in modeset: %d\n",
- crtc, dev->vblank_inmodeset[crtc]);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../clients" is read.
- *
- */
-int drm_clients_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_file *priv;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "a dev pid uid magic ioctls\n\n");
- list_for_each_entry(priv, &dev->filelist, lhead) {
- seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
- priv->authenticated ? 'y' : 'n',
- priv->minor->index,
- priv->pid,
- priv->uid, priv->magic, priv->ioctl_count);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-
-int drm_gem_one_name_info(int id, void *ptr, void *data)
-{
- struct drm_gem_object *obj = ptr;
- struct seq_file *m = data;
-
- seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
-
- seq_printf(m, "%6d %8zd %7d %8d\n",
- obj->name, obj->size,
- atomic_read(&obj->handlecount.refcount),
- atomic_read(&obj->refcount.refcount));
- return 0;
-}
-
-int drm_gem_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, " name size handles refcount\n");
- idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
- return 0;
-}
-
-int drm_gem_object_info(struct seq_file *m, void* data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
- seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
- seq_printf(m, "%d gtt total\n", dev->gtt_total);
- return 0;
-}
-
-#if DRM_DEBUG_CODE
-
-int drm_vma_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_vma_entry *pt;
- struct vm_area_struct *vma;
-#if defined(__i386__)
- unsigned int pgprot;
-#endif
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
- atomic_read(&dev->vma_count),
- high_memory, (u64)virt_to_phys(high_memory));
-
- list_for_each_entry(pt, &dev->vmalist, head) {
- vma = pt->vma;
- if (!vma)
- continue;
- seq_printf(m,
- "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
- pt->pid, vma->vm_start, vma->vm_end,
- vma->vm_flags & VM_READ ? 'r' : '-',
- vma->vm_flags & VM_WRITE ? 'w' : '-',
- vma->vm_flags & VM_EXEC ? 'x' : '-',
- vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
- vma->vm_flags & VM_LOCKED ? 'l' : '-',
- vma->vm_flags & VM_IO ? 'i' : '-',
- vma->vm_pgoff);
-
-#if defined(__i386__)
- pgprot = pgprot_val(vma->vm_page_prot);
- seq_printf(m, " %c%c%c%c%c%c%c%c%c",
- pgprot & _PAGE_PRESENT ? 'p' : '-',
- pgprot & _PAGE_RW ? 'w' : 'r',
- pgprot & _PAGE_USER ? 'u' : 's',
- pgprot & _PAGE_PWT ? 't' : 'b',
- pgprot & _PAGE_PCD ? 'u' : 'c',
- pgprot & _PAGE_ACCESSED ? 'a' : '-',
- pgprot & _PAGE_DIRTY ? 'd' : '-',
- pgprot & _PAGE_PSE ? 'm' : 'k',
- pgprot & _PAGE_GLOBAL ? 'g' : 'l');
-#endif
- seq_printf(m, "\n");
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-#endif
-
+++ /dev/null
-/**
- * \file drm_ioctl.c
- * IOCTL processing for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include "drm_core.h"
-
-#include "linux/pci.h"
-
-/**
- * Get the bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_unique structure.
- * \return zero on success or a negative number on failure.
- *
- * Copies the bus id from drm_device::unique into user space.
- */
-int drm_getunique(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
-
- if (u->unique_len >= master->unique_len) {
- if (copy_to_user(u->unique, master->unique, master->unique_len))
- return -EFAULT;
- }
- u->unique_len = master->unique_len;
-
- return 0;
-}
-
-/**
- * Set the bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_unique structure.
- * \return zero on success or a negative number on failure.
- *
- * Copies the bus id from userspace into drm_device::unique, and verifies that
- * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
- * in interface version 1.1 and will return EBUSY when setversion has requested
- * version 1.1 or greater.
- */
-int drm_setunique(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
- int domain, bus, slot, func, ret;
-
- if (master->unique_len || master->unique)
- return -EBUSY;
-
- if (!u->unique_len || u->unique_len > 1024)
- return -EINVAL;
-
- master->unique_len = u->unique_len;
- master->unique_size = u->unique_len + 1;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (!master->unique)
- return -ENOMEM;
- if (copy_from_user(master->unique, u->unique, master->unique_len))
- return -EFAULT;
-
- master->unique[master->unique_len] = '\0';
-
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname)
- return -ENOMEM;
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
-
- /* Return error if the busid submitted doesn't match the device's actual
- * busid.
- */
- ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3)
- return -EINVAL;
- domain = bus >> 8;
- bus &= 0xff;
-
- if ((domain != drm_get_pci_domain(dev)) ||
- (bus != dev->pdev->bus->number) ||
- (slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn)))
- return -EINVAL;
-
- return 0;
-}
-
-static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
- int len;
-
- if (master->unique != NULL)
- return -EBUSY;
-
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
- if (len >= master->unique_len)
- DRM_ERROR("buffer overflow");
- else
- master->unique_len = len;
-
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- master->unique_len + 2, GFP_KERNEL);
- if (dev->devname == NULL)
- return -ENOMEM;
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
-
- return 0;
-}
-
-/**
- * Get a mapping information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_map structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the mapping with the specified offset and copies its information
- * into userspace
- */
-int drm_getmap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *r_list = NULL;
- struct list_head *list;
- int idx;
- int i;
-
- idx = map->offset;
-
- mutex_lock(&dev->struct_mutex);
- if (idx < 0) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- i = 0;
- list_for_each(list, &dev->maplist) {
- if (i == idx) {
- r_list = list_entry(list, struct drm_map_list, head);
- break;
- }
- i++;
- }
- if (!r_list || !r_list->map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- map->offset = r_list->map->offset;
- map->size = r_list->map->size;
- map->type = r_list->map->type;
- map->flags = r_list->map->flags;
- map->handle = (void *)(unsigned long) r_list->user_token;
- map->mtrr = r_list->map->mtrr;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Get client information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_client structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the client with the specified index and copies its information
- * into userspace
- */
-int drm_getclient(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_client *client = data;
- struct drm_file *pt;
- int idx;
- int i;
-
- idx = client->idx;
- mutex_lock(&dev->struct_mutex);
-
- i = 0;
- list_for_each_entry(pt, &dev->filelist, lhead) {
- if (i++ >= idx) {
- client->auth = pt->authenticated;
- client->pid = pt->pid;
- client->uid = pt->uid;
- client->magic = pt->magic;
- client->iocs = pt->ioctl_count;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
- }
- }
- mutex_unlock(&dev->struct_mutex);
-
- return -EINVAL;
-}
-
-/**
- * Get statistics information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_stats structure.
- *
- * \return zero on success or a negative number on failure.
- */
-int drm_getstats(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_stats *stats = data;
- int i;
-
- memset(stats, 0, sizeof(*stats));
-
- mutex_lock(&dev->struct_mutex);
-
- for (i = 0; i < dev->counters; i++) {
- if (dev->types[i] == _DRM_STAT_LOCK)
- stats->data[i].value =
- (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
- else
- stats->data[i].value = atomic_read(&dev->counts[i]);
- stats->data[i].type = dev->types[i];
- }
-
- stats->count = dev->counters;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Setversion ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Sets the requested interface version
- */
-int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_set_version *sv = data;
- int if_version, retcode = 0;
-
- if (sv->drm_di_major != -1) {
- if (sv->drm_di_major != DRM_IF_MAJOR ||
- sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
- retcode = -EINVAL;
- goto done;
- }
- if_version = DRM_IF_VERSION(sv->drm_di_major,
- sv->drm_di_minor);
- dev->if_version = max(if_version, dev->if_version);
- if (sv->drm_di_minor >= 1) {
- /*
- * Version 1.1 includes tying of DRM to specific device
- */
- drm_set_busid(dev, file_priv);
- }
- }
-
- if (sv->drm_dd_major != -1) {
- if (sv->drm_dd_major != dev->driver->major ||
- sv->drm_dd_minor < 0 || sv->drm_dd_minor >
- dev->driver->minor) {
- retcode = -EINVAL;
- goto done;
- }
-
- if (dev->driver->set_version)
- dev->driver->set_version(dev, sv);
- }
-
-done:
- sv->drm_di_major = DRM_IF_MAJOR;
- sv->drm_di_minor = DRM_IF_MINOR;
- sv->drm_dd_major = dev->driver->major;
- sv->drm_dd_minor = dev->driver->minor;
-
- return retcode;
-}
-
-/** No-op ioctl. */
-int drm_noop(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DRM_DEBUG("\n");
- return 0;
-}
+++ /dev/null
-/**
- * \file drm_irq.c
- * IRQ support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-#include <linux/interrupt.h> /* For task queue support */
-
-#include <linux/vgaarb.h>
-/**
- * Get interrupt from bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_irq_busid structure.
- * \return zero on success or a negative number on failure.
- *
- * Finds the PCI device with the specified bus id and gets its IRQ number.
- * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
- * to that of the device that this DRM instance attached to.
- */
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_irq_busid *p = data;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
- return -EINVAL;
-
- p->irq = dev->pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
-
- return 0;
-}
-
-static void vblank_disable_fn(unsigned long arg)
-{
- struct drm_device *dev = (struct drm_device *)arg;
- unsigned long irqflags;
- int i;
-
- if (!dev->vblank_disable_allowed)
- return;
-
- for (i = 0; i < dev->num_crtcs; i++) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
- dev->vblank_enabled[i]) {
- DRM_DEBUG("disabling vblank on crtc %d\n", i);
- dev->last_vblank[i] =
- dev->driver->get_vblank_counter(dev, i);
- dev->driver->disable_vblank(dev, i);
- dev->vblank_enabled[i] = 0;
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- }
-}
-
-void drm_vblank_cleanup(struct drm_device *dev)
-{
- /* Bail if the driver didn't call drm_vblank_init() */
- if (dev->num_crtcs == 0)
- return;
-
- del_timer(&dev->vblank_disable_timer);
-
- vblank_disable_fn((unsigned long)dev);
-
- kfree(dev->vbl_queue);
- kfree(dev->_vblank_count);
- kfree(dev->vblank_refcount);
- kfree(dev->vblank_enabled);
- kfree(dev->last_vblank);
- kfree(dev->last_vblank_wait);
- kfree(dev->vblank_inmodeset);
-
- dev->num_crtcs = 0;
-}
-
-int drm_vblank_init(struct drm_device *dev, int num_crtcs)
-{
- int i, ret = -ENOMEM;
-
- setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
- (unsigned long)dev);
- spin_lock_init(&dev->vbl_lock);
- dev->num_crtcs = num_crtcs;
-
- dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vbl_queue)
- goto err;
-
- dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
- if (!dev->_vblank_count)
- goto err;
-
- dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vblank_refcount)
- goto err;
-
- dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_enabled)
- goto err;
-
- dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank)
- goto err;
-
- dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank_wait)
- goto err;
-
- dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_inmodeset)
- goto err;
-
- /* Zero per-crtc vblank stuff */
- for (i = 0; i < num_crtcs; i++) {
- init_waitqueue_head(&dev->vbl_queue[i]);
- atomic_set(&dev->_vblank_count[i], 0);
- atomic_set(&dev->vblank_refcount[i], 0);
- }
-
- dev->vblank_disable_allowed = 0;
-
- return 0;
-
-err:
- drm_vblank_cleanup(dev);
- return ret;
-}
-EXPORT_SYMBOL(drm_vblank_init);
-
-static void drm_irq_vgaarb_nokms(void *cookie, bool state)
-{
- struct drm_device *dev = cookie;
-
- if (dev->driver->vgaarb_irq) {
- dev->driver->vgaarb_irq(dev, state);
- return;
- }
-
- if (!dev->irq_enabled)
- return;
-
- if (state)
- dev->driver->irq_uninstall(dev);
- else {
- dev->driver->irq_preinstall(dev);
- dev->driver->irq_postinstall(dev);
- }
-}
-
-/**
- * Install IRQ handler.
- *
- * \param dev DRM device.
- *
- * Initializes the IRQ related data. Installs the handler, calling the driver
- * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
- * before and after the installation.
- */
-int drm_irq_install(struct drm_device *dev)
-{
- int ret = 0;
- unsigned long sh_flags = 0;
- char *irqname;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
- if (dev->pdev->irq == 0)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
-
- /* Driver must have been initialized */
- if (!dev->dev_private) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- if (dev->irq_enabled) {
- mutex_unlock(&dev->struct_mutex);
- return -EBUSY;
- }
- dev->irq_enabled = 1;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
-
- /* Before installing handler */
- dev->driver->irq_preinstall(dev);
-
- /* Install handler */
- if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
- sh_flags = IRQF_SHARED;
-
- if (dev->devname)
- irqname = dev->devname;
- else
- irqname = dev->driver->name;
-
- ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
- sh_flags, irqname, dev);
-
- if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
-
- /* After installing handler */
- ret = dev->driver->irq_postinstall(dev);
- if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(drm_irq_install);
-
-/**
- * Uninstall the IRQ handler.
- *
- * \param dev DRM device.
- *
- * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
- */
-int drm_irq_uninstall(struct drm_device * dev)
-{
- unsigned long irqflags;
- int irq_enabled, i;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- irq_enabled = dev->irq_enabled;
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * Wake up any waiters so they don't hang.
- */
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queue[i]);
- dev->vblank_enabled[i] = 0;
- dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- if (!irq_enabled)
- return -EINVAL;
-
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- vga_client_register(dev->pdev, NULL, NULL, NULL);
-
- dev->driver->irq_uninstall(dev);
-
- free_irq(dev->pdev->irq, dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_irq_uninstall);
-
-/**
- * IRQ control ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_control structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls irq_install() or irq_uninstall() according to \p arg.
- */
-int drm_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_control *ctl = data;
-
- /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
-
-
- switch (ctl->func) {
- case DRM_INST_HANDLER:
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
- if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != dev->pdev->irq)
- return -EINVAL;
- return drm_irq_install(dev);
- case DRM_UNINST_HANDLER:
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
- return drm_irq_uninstall(dev);
- default:
- return -EINVAL;
- }
-}
-
-/**
- * drm_vblank_count - retrieve "cooked" vblank counter value
- * @dev: DRM device
- * @crtc: which counter to retrieve
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity.
- */
-u32 drm_vblank_count(struct drm_device *dev, int crtc)
-{
- return atomic_read(&dev->_vblank_count[crtc]);
-}
-EXPORT_SYMBOL(drm_vblank_count);
-
-/**
- * drm_update_vblank_count - update the master vblank counter
- * @dev: DRM device
- * @crtc: counter to update
- *
- * Call back into the driver to update the appropriate vblank counter
- * (specified by @crtc). Deal with wraparound, if it occurred, and
- * update the last read value so we can deal with wraparound on the next
- * call if necessary.
- *
- * Only necessary when going from off->on, to account for frames we
- * didn't get an interrupt for.
- *
- * Note: caller must hold dev->vbl_lock since this reads & writes
- * device vblank fields.
- */
-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
-{
- u32 cur_vblank, diff;
-
- /*
- * Interrupts were disabled prior to this call, so deal with counter
- * wrap if needed.
- * NOTE! It's possible we lost a full dev->max_vblank_count events
- * here if the register is small or we had vblank interrupts off for
- * a long time.
- */
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
- diff = cur_vblank - dev->last_vblank[crtc];
- if (cur_vblank < dev->last_vblank[crtc]) {
- diff += dev->max_vblank_count;
-
- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, dev->last_vblank[crtc], cur_vblank, diff);
- }
-
- DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
- crtc, diff);
-
- atomic_add(diff, &dev->_vblank_count[crtc]);
-}
-
-/**
- * drm_vblank_get - get a reference count on vblank events
- * @dev: DRM device
- * @crtc: which CRTC to own
- *
- * Acquire a reference count on vblank events to avoid having them disabled
- * while in use.
- *
- * RETURNS
- * Zero on success, nonzero on failure.
- */
-int drm_vblank_get(struct drm_device *dev, int crtc)
-{
- unsigned long irqflags;
- int ret = 0;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- /* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
- if (!dev->vblank_enabled[crtc]) {
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
- if (ret)
- atomic_dec(&dev->vblank_refcount[crtc]);
- else {
- dev->vblank_enabled[crtc] = 1;
- drm_update_vblank_count(dev, crtc);
- }
- }
- } else {
- if (!dev->vblank_enabled[crtc]) {
- atomic_dec(&dev->vblank_refcount[crtc]);
- ret = -EINVAL;
- }
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_vblank_get);
-
-/**
- * drm_vblank_put - give up ownership of vblank events
- * @dev: DRM device
- * @crtc: which counter to give up
- *
- * Release ownership of a given vblank counter, turning off interrupts
- * if possible.
- */
-void drm_vblank_put(struct drm_device *dev, int crtc)
-{
- BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
-
- /* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
- mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
-}
-EXPORT_SYMBOL(drm_vblank_put);
-
-void drm_vblank_off(struct drm_device *dev, int crtc)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
- dev->vblank_enabled[crtc] = 0;
- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-}
-EXPORT_SYMBOL(drm_vblank_off);
-
-/**
- * drm_vblank_pre_modeset - account for vblanks across mode sets
- * @dev: DRM device
- * @crtc: CRTC in question
- * @post: post or pre mode set?
- *
- * Account for vblank events across mode setting events, which will likely
- * reset the hardware frame counter.
- */
-void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
-{
- /*
- * To avoid all the problems that might happen if interrupts
- * were enabled/disabled around or between these calls, we just
- * have the kernel take a reference on the CRTC (just once though
- * to avoid corrupting the count if multiple, mismatch calls occur),
- * so that interrupts remain enabled in the interim.
- */
- if (!dev->vblank_inmodeset[crtc]) {
- dev->vblank_inmodeset[crtc] = 0x1;
- if (drm_vblank_get(dev, crtc) == 0)
- dev->vblank_inmodeset[crtc] |= 0x2;
- }
-}
-EXPORT_SYMBOL(drm_vblank_pre_modeset);
-
-void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
-{
- unsigned long irqflags;
-
- if (dev->vblank_inmodeset[crtc]) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->vblank_disable_allowed = 1;
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- if (dev->vblank_inmodeset[crtc] & 0x2)
- drm_vblank_put(dev, crtc);
-
- dev->vblank_inmodeset[crtc] = 0;
- }
-}
-EXPORT_SYMBOL(drm_vblank_post_modeset);
-
-/**
- * drm_modeset_ctl - handle vblank event counter changes across mode switch
- * @DRM_IOCTL_ARGS: standard ioctl arguments
- *
- * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
- * ioctls around modesetting so that any lost vblank events are accounted for.
- *
- * Generally the counter will reset across mode sets. If interrupts are
- * enabled around this call, we don't have to do anything since the counter
- * will have already been incremented.
- */
-int drm_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
-
- /* If drm_vblank_init() hasn't been called yet, just no-op */
- if (!dev->num_crtcs)
- goto out;
-
- crtc = modeset->crtc;
- if (crtc >= dev->num_crtcs) {
- ret = -EINVAL;
- goto out;
- }
-
- switch (modeset->cmd) {
- case _DRM_PRE_MODESET:
- drm_vblank_pre_modeset(dev, crtc);
- break;
- case _DRM_POST_MODESET:
- drm_vblank_post_modeset(dev, crtc);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
-out:
- return ret;
-}
-
-static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
- union drm_wait_vblank *vblwait,
- struct drm_file *file_priv)
-{
- struct drm_pending_vblank_event *e;
- struct timeval now;
- unsigned long flags;
- unsigned int seq;
-
- e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL)
- return -ENOMEM;
-
- e->pipe = pipe;
- e->event.base.type = DRM_EVENT_VBLANK;
- e->event.base.length = sizeof e->event;
- e->event.user_data = vblwait->request.signal;
- e->base.event = &e->event.base;
- e->base.file_priv = file_priv;
- e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
-
- do_gettimeofday(&now);
- spin_lock_irqsave(&dev->event_lock, flags);
-
- if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- return -ENOMEM;
- }
-
- file_priv->event_space -= sizeof e->event;
- seq = drm_vblank_count(dev, pipe);
- if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1 << 23)) {
- vblwait->request.sequence = seq + 1;
- vblwait->reply.sequence = vblwait->request.sequence;
- }
-
- DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
- vblwait->request.sequence, seq, pipe);
-
- e->event.sequence = vblwait->request.sequence;
- if ((seq - vblwait->request.sequence) <= (1 << 23)) {
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- drm_vblank_put(dev, e->pipe);
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- } else {
- list_add_tail(&e->base.link, &dev->vblank_event_list);
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return 0;
-}
-
-/**
- * Wait for VBLANK.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param data user argument, pointing to a drm_wait_vblank structure.
- * \return zero on success or a negative number on failure.
- *
- * This function enables the vblank interrupt on the pipe requested, then
- * sleeps waiting for the requested sequence number to occur, and drops
- * the vblank interrupt refcount afterwards. (vblank irq disable follows that
- * after a timeout with no further vblank waits scheduled).
- */
-int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- union drm_wait_vblank *vblwait = data;
- int ret = 0;
- unsigned int flags, seq, crtc;
-
- if ((!dev->pdev->irq) || (!dev->irq_enabled))
- return -EINVAL;
-
- if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
- return -EINVAL;
-
- if (vblwait->request.type &
- ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
- DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
- vblwait->request.type,
- (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
- return -EINVAL;
- }
-
- flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-
- if (crtc >= dev->num_crtcs)
- return -EINVAL;
-
- ret = drm_vblank_get(dev, crtc);
- if (ret) {
- DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
- return ret;
- }
- seq = drm_vblank_count(dev, crtc);
-
- switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
- case _DRM_VBLANK_RELATIVE:
- vblwait->request.sequence += seq;
- vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
- case _DRM_VBLANK_ABSOLUTE:
- break;
- default:
- ret = -EINVAL;
- goto done;
- }
-
- if (flags & _DRM_VBLANK_EVENT)
- return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
-
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1<<23)) {
- vblwait->request.sequence = seq + 1;
- }
-
- DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
- vblwait->request.sequence, crtc);
- dev->last_vblank_wait[crtc] = vblwait->request.sequence;
- DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
- (((drm_vblank_count(dev, crtc) -
- vblwait->request.sequence) <= (1 << 23)) ||
- !dev->irq_enabled));
-
- if (ret != -EINTR) {
- struct timeval now;
-
- do_gettimeofday(&now);
-
- vblwait->reply.tval_sec = now.tv_sec;
- vblwait->reply.tval_usec = now.tv_usec;
- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
- DRM_DEBUG("returning %d to client\n",
- vblwait->reply.sequence);
- } else {
- DRM_DEBUG("vblank wait interrupted by signal\n");
- }
-
-done:
- drm_vblank_put(dev, crtc);
- return ret;
-}
-
-void drm_handle_vblank_events(struct drm_device *dev, int crtc)
-{
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long flags;
- unsigned int seq;
-
- do_gettimeofday(&now);
- seq = drm_vblank_count(dev, crtc);
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
- if (e->pipe != crtc)
- continue;
- if ((seq - e->event.sequence) > (1<<23))
- continue;
-
- DRM_DEBUG("vblank event on %d, current %d\n",
- e->event.sequence, seq);
-
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-/**
- * drm_handle_vblank - handle a vblank event
- * @dev: DRM device
- * @crtc: where this event occurred
- *
- * Drivers should call this routine in their vblank interrupt handlers to
- * update the vblank counter and send any signals that may be pending.
- */
-void drm_handle_vblank(struct drm_device *dev, int crtc)
-{
- if (!dev->num_crtcs)
- return;
-
- atomic_inc(&dev->_vblank_count[crtc]);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
- drm_handle_vblank_events(dev, crtc);
-}
-EXPORT_SYMBOL(drm_handle_vblank);
+++ /dev/null
-/**
- * \file drm_lock.c
- * IOCTLs for locking
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-static int drm_notifier(void *priv);
-
-/**
- * Lock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Add the current task to the lock wait queue, and attempt to take to lock.
- */
-int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- DECLARE_WAITQUEUE(entry, current);
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
- int ret = 0;
-
- ++file_priv->lock_count;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock->context, task_pid_nr(current),
- master->lock.hw_lock->lock, lock->flags);
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
- if (lock->context < 0)
- return -EINVAL;
-
- add_wait_queue(&master->lock.lock_queue, &entry);
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters++;
- spin_unlock_bh(&master->lock.spinlock);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (!master->lock.hw_lock) {
- /* Device has been unregistered */
- send_sig(SIGTERM, current, 0);
- ret = -EINTR;
- break;
- }
- if (drm_lock_take(&master->lock, lock->context)) {
- master->lock.file_priv = file_priv;
- master->lock.lock_time = jiffies;
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
- break; /* Got lock */
- }
-
- /* Contention */
- schedule();
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters--;
- spin_unlock_bh(&master->lock.spinlock);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&master->lock.lock_queue, &entry);
-
- DRM_DEBUG("%d %s\n", lock->context,
- ret ? "interrupted" : "has lock");
- if (ret) return ret;
-
- /* don't set the block all signals on the master process for now
- * really probably not the correct answer but lets us debug xkb
- * xserver for now */
- if (!file_priv->is_master) {
- sigemptyset(&dev->sigmask);
- sigaddset(&dev->sigmask, SIGSTOP);
- sigaddset(&dev->sigmask, SIGTSTP);
- sigaddset(&dev->sigmask, SIGTTIN);
- sigaddset(&dev->sigmask, SIGTTOU);
- dev->sigdata.context = lock->context;
- dev->sigdata.lock = master->lock.hw_lock;
- block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
- }
-
- if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
- dev->driver->dma_ready(dev);
-
- if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
- {
- if (dev->driver->dma_quiescent(dev)) {
- DRM_DEBUG("%d waiting for DMA quiescent\n",
- lock->context);
- return -EBUSY;
- }
- }
-
- if (dev->driver->kernel_context_switch &&
- dev->last_context != lock->context) {
- dev->driver->kernel_context_switch(dev, dev->last_context,
- lock->context);
- }
-
- return 0;
-}
-
-/**
- * Unlock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Transfer and free the lock.
- */
-int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-
- /* kernel_context_switch isn't used by any of the x86 drm
- * modules but is required by the Sparc driver.
- */
- if (dev->driver->kernel_context_switch_unlock)
- dev->driver->kernel_context_switch_unlock(dev);
- else {
- if (drm_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
- }
- }
-
- unblock_all_signals();
- return 0;
-}
-
-/**
- * Take the heavyweight lock.
- *
- * \param lock lock pointer.
- * \param context locking context.
- * \return one if the lock is held, or zero otherwise.
- *
- * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
- */
-int drm_lock_take(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- do {
- old = *lock;
- if (old & _DRM_LOCK_HELD)
- new = old | _DRM_LOCK_CONT;
- else {
- new = context | _DRM_LOCK_HELD |
- ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
- _DRM_LOCK_CONT : 0);
- }
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- spin_unlock_bh(&lock_data->spinlock);
-
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
- }
- }
-
- if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
- /* Have lock */
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_lock_take);
-
-/**
- * This takes a lock forcibly and hands it to context. Should ONLY be used
- * inside *_unlock to give lock to kernel before calling *_dma_schedule.
- *
- * \param dev DRM device.
- * \param lock lock pointer.
- * \param context locking context.
- * \return always one.
- *
- * Resets the lock file pointer.
- * Marks the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static int drm_lock_transfer(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- lock_data->file_priv = NULL;
- do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- return 1;
-}
-
-/**
- * Free lock.
- *
- * \param dev DRM device.
- * \param lock lock.
- * \param context context.
- *
- * Resets the lock file pointer.
- * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
- * waiting on the lock queue.
- */
-int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (lock_data->kernel_waiters != 0) {
- drm_lock_transfer(lock_data, 0);
- lock_data->idle_has_lock = 1;
- spin_unlock_bh(&lock_data->spinlock);
- return 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-
- do {
- old = *lock;
- new = _DRM_LOCKING_CONTEXT(old);
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
-
- if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
- DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- return 1;
- }
- wake_up_interruptible(&lock_data->lock_queue);
- return 0;
-}
-EXPORT_SYMBOL(drm_lock_free);
-
-/**
- * If we get here, it means that the process has called DRM_IOCTL_LOCK
- * without calling DRM_IOCTL_UNLOCK.
- *
- * If the lock is not held, then let the signal proceed as usual. If the lock
- * is held, then set the contended flag and keep the signal blocked.
- *
- * \param priv pointer to a drm_sigdata structure.
- * \return one if the signal should be delivered normally, or zero if the
- * signal should be blocked.
- */
-static int drm_notifier(void *priv)
-{
- struct drm_sigdata *s = (struct drm_sigdata *) priv;
- unsigned int old, new, prev;
-
- /* Allow signal delivery if lock isn't held */
- if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
- || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
- return 1;
-
- /* Otherwise, set flag to force call to
- drmUnlock */
- do {
- old = s->lock->lock;
- new = old | _DRM_LOCK_CONT;
- prev = cmpxchg(&s->lock->lock, old, new);
- } while (prev != old);
- return 0;
-}
-
-/**
- * This function returns immediately and takes the hw lock
- * with the kernel context if it is free, otherwise it gets the highest priority when and if
- * it is eventually released.
- *
- * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
- * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
- * a deadlock, which is why the "idlelock" was invented).
- *
- * This should be sufficient to wait for GPU idle without
- * having to worry about starvation.
- */
-
-void drm_idlelock_take(struct drm_lock_data *lock_data)
-{
- int ret = 0;
-
- spin_lock_bh(&lock_data->spinlock);
- lock_data->kernel_waiters++;
- if (!lock_data->idle_has_lock) {
-
- spin_unlock_bh(&lock_data->spinlock);
- ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
- spin_lock_bh(&lock_data->spinlock);
-
- if (ret == 1)
- lock_data->idle_has_lock = 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_idlelock_take);
-
-void drm_idlelock_release(struct drm_lock_data *lock_data)
-{
- unsigned int old, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (--lock_data->kernel_waiters == 0) {
- if (lock_data->idle_has_lock) {
- do {
- old = *lock;
- prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
- } while (prev != old);
- wake_up_interruptible(&lock_data->lock_queue);
- lock_data->idle_has_lock = 0;
- }
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_idlelock_release);
-
-
-int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
- return (file_priv->lock_count && master->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
- master->lock.file_priv == file_priv);
-}
-
-EXPORT_SYMBOL(drm_i_have_hw_lock);
+++ /dev/null
-/**
- * \file drm_memory.c
- * Memory management wrappers for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/highmem.h>
-#include "drmP.h"
-
-/**
- * Called when "/proc/dri/%dev%/mem" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param len requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * No-op.
- */
-int drm_mem_info(char *buf, char **start, off_t offset,
- int len, int *eof, void *data)
-{
- return 0;
-}
-
-#if __OS_HAS_AGP
-static void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device * dev)
-{
- unsigned long i, num_pages =
- PAGE_ALIGN(size) / PAGE_SIZE;
- struct drm_agp_mem *agpmem;
- struct page **page_map;
- struct page **phys_page_map;
- void *addr;
-
- size = PAGE_ALIGN(size);
-
-#ifdef __alpha__
- offset -= dev->hose->mem_space->start;
-#endif
-
- list_for_each_entry(agpmem, &dev->agp->memory, head)
- if (agpmem->bound <= offset
- && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
- (offset + size))
- break;
- if (!agpmem)
- return NULL;
-
- /*
- * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
- * the CPU do not get remapped by the GART. We fix this by using the kernel's
- * page-table instead (that's probably faster anyhow...).
- */
- /* note: use vmalloc() because num_pages could be large... */
- page_map = vmalloc(num_pages * sizeof(struct page *));
- if (!page_map)
- return NULL;
-
- phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
- for (i = 0; i < num_pages; ++i)
- page_map[i] = phys_page_map[i];
- addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
- vfree(page_map);
-
- return addr;
-}
-
-/** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
-{
- return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
-}
-
-/** Wrapper around agp_free_memory() */
-int drm_free_agp(DRM_AGP_MEM * handle, int pages)
-{
- return drm_agp_free_memory(handle) ? 0 : -EINVAL;
-}
-EXPORT_SYMBOL(drm_free_agp);
-
-/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
-{
- return drm_agp_bind_memory(handle, start);
-}
-
-/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(DRM_AGP_MEM * handle)
-{
- return drm_agp_unbind_memory(handle);
-}
-EXPORT_SYMBOL(drm_unbind_agp);
-
-#else /* __OS_HAS_AGP */
-static inline void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device * dev)
-{
- return NULL;
-}
-
-#endif /* agp */
-
-void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
-{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_core_ioremap);
-
-void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
-{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap_wc(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_core_ioremap_wc);
-
-void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
-{
- if (!map->handle || !map->size)
- return;
-
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- vunmap(map->handle);
- else
- iounmap(map->handle);
-}
-EXPORT_SYMBOL(drm_core_ioremapfree);
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-
-/*
- * Generic simple memory manager implementation. Intended to be used as a base
- * class implementation for more advanced memory managers.
- *
- * Note that the algorithm used is quite simple and there might be substantial
- * performance gains if a smarter free list is implemented. Currently it is just an
- * unordered stack of free regions. This could easily be improved if an RB-tree
- * is used instead. At least if we expect heavy fragmentation.
- *
- * Aligned allocations can also see improvement.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-#include "drm_mm.h"
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-
-#define MM_UNUSED_TARGET 4
-
-unsigned long drm_mm_tail_space(struct drm_mm *mm)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return 0;
-
- return entry->size;
-}
-
-int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return -ENOMEM;
-
- if (entry->size <= size)
- return -ENOMEM;
-
- entry->size -= size;
- return 0;
-}
-
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
- struct drm_mm_node *child;
-
- if (atomic)
- child = kmalloc(sizeof(*child), GFP_ATOMIC);
- else
- child = kmalloc(sizeof(*child), GFP_KERNEL);
-
- if (unlikely(child == NULL)) {
- spin_lock(&mm->unused_lock);
- if (list_empty(&mm->unused_nodes))
- child = NULL;
- else {
- child =
- list_entry(mm->unused_nodes.next,
- struct drm_mm_node, fl_entry);
- list_del(&child->fl_entry);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
- }
- return child;
-}
-
-/* drm_mm_pre_get() - pre allocate drm_mm_node structure
- * drm_mm: memory manager struct we are pre-allocating for
- *
- * Returns 0 on success or -ENOMEM if allocation fails.
- */
-int drm_mm_pre_get(struct drm_mm *mm)
-{
- struct drm_mm_node *node;
-
- spin_lock(&mm->unused_lock);
- while (mm->num_unused < MM_UNUSED_TARGET) {
- spin_unlock(&mm->unused_lock);
- node = kmalloc(sizeof(*node), GFP_KERNEL);
- spin_lock(&mm->unused_lock);
-
- if (unlikely(node == NULL)) {
- int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
- spin_unlock(&mm->unused_lock);
- return ret;
- }
- ++mm->num_unused;
- list_add_tail(&node->fl_entry, &mm->unused_nodes);
- }
- spin_unlock(&mm->unused_lock);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_pre_get);
-
-static int drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size, int atomic)
-{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(mm, atomic);
- if (unlikely(child == NULL))
- return -ENOMEM;
-
- child->free = 1;
- child->size = size;
- child->start = start;
- child->mm = mm;
-
- list_add_tail(&child->ml_entry, &mm->ml_entry);
- list_add_tail(&child->fl_entry, &mm->fl_entry);
-
- return 0;
-}
-
-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free) {
- return drm_mm_create_tail_node(mm, entry->start + entry->size,
- size, atomic);
- }
- entry->size += size;
- return 0;
-}
-
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size,
- int atomic)
-{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(parent->mm, atomic);
- if (unlikely(child == NULL))
- return NULL;
-
- INIT_LIST_HEAD(&child->fl_entry);
-
- child->free = 0;
- child->size = size;
- child->start = parent->start;
- child->mm = parent->mm;
-
- list_add_tail(&child->ml_entry, &parent->ml_entry);
- INIT_LIST_HEAD(&child->fl_entry);
-
- parent->size -= size;
- parent->start += size;
- return child;
-}
-
-
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- int atomic)
-{
-
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
-
- if (alignment)
- tmp = node->start % alignment;
-
- if (tmp) {
- align_splitoff =
- drm_mm_split_at_start(node, alignment - tmp, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
-
- if (node->size == size) {
- list_del_init(&node->fl_entry);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
-
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
-
- return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
-
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int atomic)
-{
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
- unsigned wasted = 0;
-
- if (node->start < start)
- wasted += start - node->start;
- if (alignment)
- tmp = ((node->start + wasted) % alignment);
-
- if (tmp)
- wasted += alignment - tmp;
- if (wasted) {
- align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
-
- if (node->size == size) {
- list_del_init(&node->fl_entry);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
-
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
-
- return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-
-/*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
- */
-
-void drm_mm_put_block(struct drm_mm_node *cur)
-{
-
- struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->ml_entry;
- struct list_head *root_head = &mm->ml_entry;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
-
- int merged = 0;
-
- if (cur_head->prev != root_head) {
- prev_node =
- list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
- if (prev_node->free) {
- prev_node->size += cur->size;
- merged = 1;
- }
- }
- if (cur_head->next != root_head) {
- next_node =
- list_entry(cur_head->next, struct drm_mm_node, ml_entry);
- if (next_node->free) {
- if (merged) {
- prev_node->size += next_node->size;
- list_del(&next_node->ml_entry);
- list_del(&next_node->fl_entry);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&next_node->fl_entry,
- &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(next_node);
- spin_unlock(&mm->unused_lock);
- } else {
- next_node->size += cur->size;
- next_node->start = cur->start;
- merged = 1;
- }
- }
- }
- if (!merged) {
- cur->free = 1;
- list_add(&cur->fl_entry, &mm->fl_entry);
- } else {
- list_del(&cur->ml_entry);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&cur->fl_entry, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(cur);
- spin_unlock(&mm->unused_lock);
- }
-}
-
-EXPORT_SYMBOL(drm_mm_put_block);
-
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment, int best_match)
-{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
- struct drm_mm_node *entry;
- struct drm_mm_node *best;
- unsigned long best_size;
- unsigned wasted;
-
- best = NULL;
- best_size = ~0UL;
-
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
-
- if (entry->size < size)
- continue;
-
- if (alignment) {
- register unsigned tmp = entry->start % alignment;
- if (tmp)
- wasted += alignment - tmp;
- }
-
- if (entry->size >= size + wasted) {
- if (!best_match)
- return entry;
- if (size < best_size) {
- best = entry;
- best_size = entry->size;
- }
- }
- }
-
- return best;
-}
-EXPORT_SYMBOL(drm_mm_search_free);
-
-struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int best_match)
-{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
- struct drm_mm_node *entry;
- struct drm_mm_node *best;
- unsigned long best_size;
- unsigned wasted;
-
- best = NULL;
- best_size = ~0UL;
-
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
-
- if (entry->size < size)
- continue;
-
- if (entry->start > end || (entry->start+entry->size) < start)
- continue;
-
- if (entry->start < start)
- wasted += start - entry->start;
-
- if (alignment) {
- register unsigned tmp = (entry->start + wasted) % alignment;
- if (tmp)
- wasted += alignment - tmp;
- }
-
- if (entry->size >= size + wasted) {
- if (!best_match)
- return entry;
- if (size < best_size) {
- best = entry;
- best_size = entry->size;
- }
- }
- }
-
- return best;
-}
-EXPORT_SYMBOL(drm_mm_search_free_in_range);
-
-int drm_mm_clean(struct drm_mm * mm)
-{
- struct list_head *head = &mm->ml_entry;
-
- return (head->next->next == head);
-}
-EXPORT_SYMBOL(drm_mm_clean);
-
-int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
-{
- INIT_LIST_HEAD(&mm->ml_entry);
- INIT_LIST_HEAD(&mm->fl_entry);
- INIT_LIST_HEAD(&mm->unused_nodes);
- mm->num_unused = 0;
- spin_lock_init(&mm->unused_lock);
-
- return drm_mm_create_tail_node(mm, start, size, 0);
-}
-EXPORT_SYMBOL(drm_mm_init);
-
-void drm_mm_takedown(struct drm_mm * mm)
-{
- struct list_head *bnode = mm->fl_entry.next;
- struct drm_mm_node *entry;
- struct drm_mm_node *next;
-
- entry = list_entry(bnode, struct drm_mm_node, fl_entry);
-
- if (entry->ml_entry.next != &mm->ml_entry ||
- entry->fl_entry.next != &mm->fl_entry) {
- DRM_ERROR("Memory manager not clean. Delaying takedown\n");
- return;
- }
-
- list_del(&entry->fl_entry);
- list_del(&entry->ml_entry);
- kfree(entry);
-
- spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
- list_del(&entry->fl_entry);
- kfree(entry);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
-
- BUG_ON(mm->num_unused != 0);
-}
-EXPORT_SYMBOL(drm_mm_takedown);
-
-void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
-{
- struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
- printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
- prefix, entry->start, entry->start + entry->size,
- entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
- }
- printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
- total_used, total_free);
-}
-EXPORT_SYMBOL(drm_mm_debug_table);
-
-#if defined(CONFIG_DEBUG_FS)
-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
-{
- struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
- }
- seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_dump_table);
-#endif
+++ /dev/null
-/*
- * The list_sort function is (presumably) licensed under the GPL (see the
- * top level "COPYING" file for details).
- *
- * The remainder of this file is:
- *
- * Copyright © 1997-2003 by The XFree86 Project, Inc.
- * Copyright © 2007 Dave Airlie
- * Copyright © 2007-2008 Intel Corporation
- * Jesse Barnes <jesse.barnes@intel.com>
- * Copyright 2005-2006 Luc Verhaegen
- * Copyright (c) 2001, Andy Ritger aritger@nvidia.com
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Except as contained in this notice, the name of the copyright holder(s)
- * and author(s) shall not be used in advertising or otherwise to promote
- * the sale, use or other dealings in this Software without prior written
- * authorization from the copyright holder(s) and author(s).
- */
-
-#include <linux/list.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
-
-/**
- * drm_mode_debug_printmodeline - debug print a mode
- * @dev: DRM device
- * @mode: mode to print
- *
- * LOCKING:
- * None.
- *
- * Describe @mode using DRM_DEBUG.
- */
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
-{
- DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
- "0x%x 0x%x\n",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
-}
-EXPORT_SYMBOL(drm_mode_debug_printmodeline);
-
-/**
- * drm_cvt_mode -create a modeline based on CVT algorithm
- * @dev: DRM device
- * @hdisplay: hdisplay size
- * @vdisplay: vdisplay size
- * @vrefresh : vrefresh rate
- * @reduced : Whether the GTF calculation is simplified
- * @interlaced:Whether the interlace is supported
- *
- * LOCKING:
- * none.
- *
- * return the modeline based on CVT algorithm
- *
- * This function is called to generate the modeline based on CVT algorithm
- * according to the hdisplay, vdisplay, vrefresh.
- * It is based from the VESA(TM) Coordinated Video Timing Generator by
- * Graham Loveridge April 9, 2003 available at
- * http://www.vesa.org/public/CVT/CVTd6r1.xls
- *
- * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
- * What I have done is to translate it by using integer calculation.
- */
-#define HV_FACTOR 1000
-struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool reduced, bool interlaced, bool margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
-#define CVT_MARGIN_PERCENTAGE 18
- /* 2) character cell horizontal granularity (pixels) - default 8 */
-#define CVT_H_GRANULARITY 8
- /* 3) Minimum vertical porch (lines) - default 3 */
-#define CVT_MIN_V_PORCH 3
- /* 4) Minimum number of vertical back porch lines - default 6 */
-#define CVT_MIN_V_BPORCH 6
- /* Pixel Clock step (kHz) */
-#define CVT_CLOCK_STEP 250
- struct drm_display_mode *drm_mode;
- unsigned int vfieldrate, hperiod;
- int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
- int interlace;
-
- /* allocate the drm_display_mode structure. If failure, we will
- * return directly
- */
- drm_mode = drm_mode_create(dev);
- if (!drm_mode)
- return NULL;
-
- /* the CVT default refresh rate is 60Hz */
- if (!vrefresh)
- vrefresh = 60;
-
- /* the required field fresh rate */
- if (interlaced)
- vfieldrate = vrefresh * 2;
- else
- vfieldrate = vrefresh;
-
- /* horizontal pixels */
- hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
-
- /* determine the left&right borders */
- hmargin = 0;
- if (margins) {
- hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
- hmargin -= hmargin % CVT_H_GRANULARITY;
- }
- /* find the total active pixels */
- drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
-
- /* find the number of lines per field */
- if (interlaced)
- vdisplay_rnd = vdisplay / 2;
- else
- vdisplay_rnd = vdisplay;
-
- /* find the top & bottom borders */
- vmargin = 0;
- if (margins)
- vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
-
- drm_mode->vdisplay = vdisplay + 2 * vmargin;
-
- /* Interlaced */
- if (interlaced)
- interlace = 1;
- else
- interlace = 0;
-
- /* Determine VSync Width from aspect ratio */
- if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
- vsync = 4;
- else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
- vsync = 5;
- else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
- vsync = 6;
- else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
- vsync = 7;
- else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
- vsync = 7;
- else /* custom */
- vsync = 10;
-
- if (!reduced) {
- /* simplify the GTF calculation */
- /* 4) Minimum time of vertical sync + back porch interval (µs)
- * default 550.0
- */
- int tmp1, tmp2;
-#define CVT_MIN_VSYNC_BP 550
- /* 3) Nominal HSync width (% of line period) - default 8 */
-#define CVT_HSYNC_PERCENTAGE 8
- unsigned int hblank_percentage;
- int vsyncandback_porch, vback_porch, hblank;
-
- /* estimated the horizontal period */
- tmp1 = HV_FACTOR * 1000000 -
- CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
- tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
- interlace;
- hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
-
- tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
- /* 9. Find number of lines in sync + backporch */
- if (tmp1 < (vsync + CVT_MIN_V_PORCH))
- vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
- else
- vsyncandback_porch = tmp1;
- /* 10. Find number of lines in back porch */
- vback_porch = vsyncandback_porch - vsync;
- drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
- vsyncandback_porch + CVT_MIN_V_PORCH;
- /* 5) Definition of Horizontal blanking time limitation */
- /* Gradient (%/kHz) - default 600 */
-#define CVT_M_FACTOR 600
- /* Offset (%) - default 40 */
-#define CVT_C_FACTOR 40
- /* Blanking time scaling factor - default 128 */
-#define CVT_K_FACTOR 128
- /* Scaling factor weighting - default 20 */
-#define CVT_J_FACTOR 20
-#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256)
-#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
- CVT_J_FACTOR)
- /* 12. Find ideal blanking duty cycle from formula */
- hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
- hperiod / 1000;
- /* 13. Blanking time */
- if (hblank_percentage < 20 * HV_FACTOR)
- hblank_percentage = 20 * HV_FACTOR;
- hblank = drm_mode->hdisplay * hblank_percentage /
- (100 * HV_FACTOR - hblank_percentage);
- hblank -= hblank % (2 * CVT_H_GRANULARITY);
- /* 14. find the total pixes per line */
- drm_mode->htotal = drm_mode->hdisplay + hblank;
- drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
- drm_mode->hsync_start = drm_mode->hsync_end -
- (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
- drm_mode->hsync_start += CVT_H_GRANULARITY -
- drm_mode->hsync_start % CVT_H_GRANULARITY;
- /* fill the Vsync values */
- drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
- drm_mode->vsync_end = drm_mode->vsync_start + vsync;
- } else {
- /* Reduced blanking */
- /* Minimum vertical blanking interval time (µs)- default 460 */
-#define CVT_RB_MIN_VBLANK 460
- /* Fixed number of clocks for horizontal sync */
-#define CVT_RB_H_SYNC 32
- /* Fixed number of clocks for horizontal blanking */
-#define CVT_RB_H_BLANK 160
- /* Fixed number of lines for vertical front porch - default 3*/
-#define CVT_RB_VFPORCH 3
- int vbilines;
- int tmp1, tmp2;
- /* 8. Estimate Horizontal period. */
- tmp1 = HV_FACTOR * 1000000 -
- CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
- tmp2 = vdisplay_rnd + 2 * vmargin;
- hperiod = tmp1 / (tmp2 * vfieldrate);
- /* 9. Find number of lines in vertical blanking */
- vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
- /* 10. Check if vertical blanking is sufficient */
- if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
- vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
- /* 11. Find total number of lines in vertical field */
- drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
- /* 12. Find total number of pixels in a line */
- drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
- /* Fill in HSync values */
- drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
- drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
- }
- /* 15/13. Find pixel clock frequency (kHz for xf86) */
- drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
- drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
- /* 18/16. Find actual vertical frame frequency */
- /* ignore - just set the mode flag for interlaced */
- if (interlaced)
- drm_mode->vtotal *= 2;
- /* Fill the mode line name */
- drm_mode_set_name(drm_mode);
- if (reduced)
- drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
- DRM_MODE_FLAG_NVSYNC);
- else
- drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_NHSYNC);
- if (interlaced)
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- return drm_mode;
-}
-EXPORT_SYMBOL(drm_cvt_mode);
-
-/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
- *
- * @dev :drm device
- * @hdisplay :hdisplay size
- * @vdisplay :vdisplay size
- * @vrefresh :vrefresh rate.
- * @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
- *
- * LOCKING.
- * none.
- *
- * return the modeline based on GTF algorithm
- *
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- * GTF Spreadsheet by Andy Morrish (1/5/97)
- * available at http://www.vesa.org
- *
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
- */
-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool interlaced, int margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
-#define GTF_MARGIN_PERCENTAGE 18
- /* 2) character cell horizontal granularity (pixels) - default 8 */
-#define GTF_CELL_GRAN 8
- /* 3) Minimum vertical porch (lines) - default 3 */
-#define GTF_MIN_V_PORCH 1
- /* width of vsync in lines */
-#define V_SYNC_RQD 3
- /* width of hsync as % of total line */
-#define H_SYNC_PERCENT 8
- /* min time of vsync + back porch (microsec) */
-#define MIN_VSYNC_PLUS_BP 550
- /* blanking formula gradient */
-#define GTF_M 600
- /* blanking formula offset */
-#define GTF_C 40
- /* blanking formula scaling factor */
-#define GTF_K 128
- /* blanking formula scaling factor */
-#define GTF_J 20
- /* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-#define GTF_M_PRIME (GTF_K * GTF_M / 256)
- struct drm_display_mode *drm_mode;
- unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
- int top_margin, bottom_margin;
- int interlace;
- unsigned int hfreq_est;
- int vsync_plus_bp, vback_porch;
- unsigned int vtotal_lines, vfieldrate_est, hperiod;
- unsigned int vfield_rate, vframe_rate;
- int left_margin, right_margin;
- unsigned int total_active_pixels, ideal_duty_cycle;
- unsigned int hblank, total_pixels, pixel_freq;
- int hsync, hfront_porch, vodd_front_porch_lines;
- unsigned int tmp1, tmp2;
-
- drm_mode = drm_mode_create(dev);
- if (!drm_mode)
- return NULL;
-
- /* 1. In order to give correct results, the number of horizontal
- * pixels requested is first processed to ensure that it is divisible
- * by the character size, by rounding it to the nearest character
- * cell boundary:
- */
- hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
- hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
-
- /* 2. If interlace is requested, the number of vertical lines assumed
- * by the calculation must be halved, as the computation calculates
- * the number of vertical lines per field.
- */
- if (interlaced)
- vdisplay_rnd = vdisplay / 2;
- else
- vdisplay_rnd = vdisplay;
-
- /* 3. Find the frame rate required: */
- if (interlaced)
- vfieldrate_rqd = vrefresh * 2;
- else
- vfieldrate_rqd = vrefresh;
-
- /* 4. Find number of lines in Top margin: */
- top_margin = 0;
- if (margins)
- top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
- 1000;
- /* 5. Find number of lines in bottom margin: */
- bottom_margin = top_margin;
-
- /* 6. If interlace is required, then set variable interlace: */
- if (interlaced)
- interlace = 1;
- else
- interlace = 0;
-
- /* 7. Estimate the Horizontal frequency */
- {
- tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
- tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
- 2 + interlace;
- hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
- }
-
- /* 8. Find the number of lines in V sync + back porch */
- /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
- vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
- vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
- /* 9. Find the number of lines in V back porch alone: */
- vback_porch = vsync_plus_bp - V_SYNC_RQD;
- /* 10. Find the total number of lines in Vertical field period: */
- vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
- vsync_plus_bp + GTF_MIN_V_PORCH;
- /* 11. Estimate the Vertical field frequency: */
- vfieldrate_est = hfreq_est / vtotal_lines;
- /* 12. Find the actual horizontal period: */
- hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
-
- /* 13. Find the actual Vertical field frequency: */
- vfield_rate = hfreq_est / vtotal_lines;
- /* 14. Find the Vertical frame frequency: */
- if (interlaced)
- vframe_rate = vfield_rate / 2;
- else
- vframe_rate = vfield_rate;
- /* 15. Find number of pixels in left margin: */
- if (margins)
- left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
- 1000;
- else
- left_margin = 0;
-
- /* 16.Find number of pixels in right margin: */
- right_margin = left_margin;
- /* 17.Find total number of active pixels in image and left and right */
- total_active_pixels = hdisplay_rnd + left_margin + right_margin;
- /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
- ideal_duty_cycle = GTF_C_PRIME * 1000 -
- (GTF_M_PRIME * 1000000 / hfreq_est);
- /* 19.Find the number of pixels in the blanking time to the nearest
- * double character cell: */
- hblank = total_active_pixels * ideal_duty_cycle /
- (100000 - ideal_duty_cycle);
- hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
- hblank = hblank * 2 * GTF_CELL_GRAN;
- /* 20.Find total number of pixels: */
- total_pixels = total_active_pixels + hblank;
- /* 21.Find pixel clock frequency: */
- pixel_freq = total_pixels * hfreq_est / 1000;
- /* Stage 1 computations are now complete; I should really pass
- * the results to another function and do the Stage 2 computations,
- * but I only need a few more values so I'll just append the
- * computations here for now */
- /* 17. Find the number of pixels in the horizontal sync period: */
- hsync = H_SYNC_PERCENT * total_pixels / 100;
- hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
- hsync = hsync * GTF_CELL_GRAN;
- /* 18. Find the number of pixels in horizontal front porch period */
- hfront_porch = hblank / 2 - hsync;
- /* 36. Find the number of lines in the odd front porch period: */
- vodd_front_porch_lines = GTF_MIN_V_PORCH ;
-
- /* finally, pack the results in the mode struct */
- drm_mode->hdisplay = hdisplay_rnd;
- drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
- drm_mode->hsync_end = drm_mode->hsync_start + hsync;
- drm_mode->htotal = total_pixels;
- drm_mode->vdisplay = vdisplay_rnd;
- drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
- drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
- drm_mode->vtotal = vtotal_lines;
-
- drm_mode->clock = pixel_freq;
-
- drm_mode_set_name(drm_mode);
- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-
- if (interlaced) {
- drm_mode->vtotal *= 2;
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
- }
-
- return drm_mode;
-}
-EXPORT_SYMBOL(drm_gtf_mode);
-/**
- * drm_mode_set_name - set the name on a mode
- * @mode: name will be set in this mode
- *
- * LOCKING:
- * None.
- *
- * Set the name of @mode to a standard format.
- */
-void drm_mode_set_name(struct drm_display_mode *mode)
-{
- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
- mode->vdisplay);
-}
-EXPORT_SYMBOL(drm_mode_set_name);
-
-/**
- * drm_mode_list_concat - move modes from one list to another
- * @head: source list
- * @new: dst list
- *
- * LOCKING:
- * Caller must ensure both lists are locked.
- *
- * Move all the modes from @head to @new.
- */
-void drm_mode_list_concat(struct list_head *head, struct list_head *new)
-{
-
- struct list_head *entry, *tmp;
-
- list_for_each_safe(entry, tmp, head) {
- list_move_tail(entry, new);
- }
-}
-EXPORT_SYMBOL(drm_mode_list_concat);
-
-/**
- * drm_mode_width - get the width of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's width (hdisplay) value.
- *
- * FIXME: is this needed?
- *
- * RETURNS:
- * @mode->hdisplay
- */
-int drm_mode_width(struct drm_display_mode *mode)
-{
- return mode->hdisplay;
-
-}
-EXPORT_SYMBOL(drm_mode_width);
-
-/**
- * drm_mode_height - get the height of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's height (vdisplay) value.
- *
- * FIXME: is this needed?
- *
- * RETURNS:
- * @mode->vdisplay
- */
-int drm_mode_height(struct drm_display_mode *mode)
-{
- return mode->vdisplay;
-}
-EXPORT_SYMBOL(drm_mode_height);
-
-/** drm_mode_hsync - get the hsync of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @modes's hsync rate in kHz, rounded to the nearest int.
- */
-int drm_mode_hsync(struct drm_display_mode *mode)
-{
- unsigned int calc_val;
-
- if (mode->hsync)
- return mode->hsync;
-
- if (mode->htotal < 0)
- return 0;
-
- calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
- calc_val += 500; /* round to 1000Hz */
- calc_val /= 1000; /* truncate to kHz */
-
- return calc_val;
-}
-EXPORT_SYMBOL(drm_mode_hsync);
-
-/**
- * drm_mode_vrefresh - get the vrefresh of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's vrefresh rate in Hz or calculate it if necessary.
- *
- * FIXME: why is this needed? shouldn't vrefresh be set already?
- *
- * RETURNS:
- * Vertical refresh rate. It will be the result of actual value plus 0.5.
- * If it is 70.288, it will return 70Hz.
- * If it is 59.6, it will return 60Hz.
- */
-int drm_mode_vrefresh(struct drm_display_mode *mode)
-{
- int refresh = 0;
- unsigned int calc_val;
-
- if (mode->vrefresh > 0)
- refresh = mode->vrefresh;
- else if (mode->htotal > 0 && mode->vtotal > 0) {
- int vtotal;
- vtotal = mode->vtotal;
- /* work out vrefresh the value will be x1000 */
- calc_val = (mode->clock * 1000);
- calc_val /= mode->htotal;
- refresh = (calc_val + vtotal / 2) / vtotal;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- refresh *= 2;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- refresh /= 2;
- if (mode->vscan > 1)
- refresh /= mode->vscan;
- }
- return refresh;
-}
-EXPORT_SYMBOL(drm_mode_vrefresh);
-
-/**
- * drm_mode_set_crtcinfo - set CRTC modesetting parameters
- * @p: mode
- * @adjust_flags: unused? (FIXME)
- *
- * LOCKING:
- * None.
- *
- * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
- */
-void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
-{
- if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
- return;
-
- p->crtc_hdisplay = p->hdisplay;
- p->crtc_hsync_start = p->hsync_start;
- p->crtc_hsync_end = p->hsync_end;
- p->crtc_htotal = p->htotal;
- p->crtc_hskew = p->hskew;
- p->crtc_vdisplay = p->vdisplay;
- p->crtc_vsync_start = p->vsync_start;
- p->crtc_vsync_end = p->vsync_end;
- p->crtc_vtotal = p->vtotal;
-
- if (p->flags & DRM_MODE_FLAG_INTERLACE) {
- if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
- p->crtc_vdisplay /= 2;
- p->crtc_vsync_start /= 2;
- p->crtc_vsync_end /= 2;
- p->crtc_vtotal /= 2;
- }
-
- p->crtc_vtotal |= 1;
- }
-
- if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
- p->crtc_vdisplay *= 2;
- p->crtc_vsync_start *= 2;
- p->crtc_vsync_end *= 2;
- p->crtc_vtotal *= 2;
- }
-
- if (p->vscan > 1) {
- p->crtc_vdisplay *= p->vscan;
- p->crtc_vsync_start *= p->vscan;
- p->crtc_vsync_end *= p->vscan;
- p->crtc_vtotal *= p->vscan;
- }
-
- p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
- p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
- p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
- p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
-
- p->crtc_hadjusted = false;
- p->crtc_vadjusted = false;
-}
-EXPORT_SYMBOL(drm_mode_set_crtcinfo);
-
-
-/**
- * drm_mode_duplicate - allocate and duplicate an existing mode
- * @m: mode to duplicate
- *
- * LOCKING:
- * None.
- *
- * Just allocate a new mode, copy the existing mode into it, and return
- * a pointer to it. Used to create new instances of established modes.
- */
-struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode)
-{
- struct drm_display_mode *nmode;
- int new_id;
-
- nmode = drm_mode_create(dev);
- if (!nmode)
- return NULL;
-
- new_id = nmode->base.id;
- *nmode = *mode;
- nmode->base.id = new_id;
- INIT_LIST_HEAD(&nmode->head);
- return nmode;
-}
-EXPORT_SYMBOL(drm_mode_duplicate);
-
-/**
- * drm_mode_equal - test modes for equality
- * @mode1: first mode
- * @mode2: second mode
- *
- * LOCKING:
- * None.
- *
- * Check to see if @mode1 and @mode2 are equivalent.
- *
- * RETURNS:
- * True if the modes are equal, false otherwise.
- */
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
-{
- /* do clock check convert to PICOS so fb modes get matched
- * the same */
- if (mode1->clock && mode2->clock) {
- if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
- return false;
- } else if (mode1->clock != mode2->clock)
- return false;
-
- if (mode1->hdisplay == mode2->hdisplay &&
- mode1->hsync_start == mode2->hsync_start &&
- mode1->hsync_end == mode2->hsync_end &&
- mode1->htotal == mode2->htotal &&
- mode1->hskew == mode2->hskew &&
- mode1->vdisplay == mode2->vdisplay &&
- mode1->vsync_start == mode2->vsync_start &&
- mode1->vsync_end == mode2->vsync_end &&
- mode1->vtotal == mode2->vtotal &&
- mode1->vscan == mode2->vscan &&
- mode1->flags == mode2->flags)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(drm_mode_equal);
-
-/**
- * drm_mode_validate_size - make sure modes adhere to size constraints
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @maxX: maximum width
- * @maxY: maximum height
- * @maxPitch: max pitch
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * The DRM device (@dev) has size and pitch limits. Here we validate the
- * modes we probed for @dev against those limits and set their status as
- * necessary.
- */
-void drm_mode_validate_size(struct drm_device *dev,
- struct list_head *mode_list,
- int maxX, int maxY, int maxPitch)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, mode_list, head) {
- if (maxPitch > 0 && mode->hdisplay > maxPitch)
- mode->status = MODE_BAD_WIDTH;
-
- if (maxX > 0 && mode->hdisplay > maxX)
- mode->status = MODE_VIRTUAL_X;
-
- if (maxY > 0 && mode->vdisplay > maxY)
- mode->status = MODE_VIRTUAL_Y;
- }
-}
-EXPORT_SYMBOL(drm_mode_validate_size);
-
-/**
- * drm_mode_validate_clocks - validate modes against clock limits
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @min: minimum clock rate array
- * @max: maximum clock rate array
- * @n_ranges: number of clock ranges (size of arrays)
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Some code may need to check a mode list against the clock limits of the
- * device in question. This function walks the mode list, testing to make
- * sure each mode falls within a given range (defined by @min and @max
- * arrays) and sets @mode->status as needed.
- */
-void drm_mode_validate_clocks(struct drm_device *dev,
- struct list_head *mode_list,
- int *min, int *max, int n_ranges)
-{
- struct drm_display_mode *mode;
- int i;
-
- list_for_each_entry(mode, mode_list, head) {
- bool good = false;
- for (i = 0; i < n_ranges; i++) {
- if (mode->clock >= min[i] && mode->clock <= max[i]) {
- good = true;
- break;
- }
- }
- if (!good)
- mode->status = MODE_CLOCK_RANGE;
- }
-}
-EXPORT_SYMBOL(drm_mode_validate_clocks);
-
-/**
- * drm_mode_prune_invalid - remove invalid modes from mode list
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @verbose: be verbose about it
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Once mode list generation is complete, a caller can use this routine to
- * remove invalid modes from a mode list. If any of the modes have a
- * status other than %MODE_OK, they are removed from @mode_list and freed.
- */
-void drm_mode_prune_invalid(struct drm_device *dev,
- struct list_head *mode_list, bool verbose)
-{
- struct drm_display_mode *mode, *t;
-
- list_for_each_entry_safe(mode, t, mode_list, head) {
- if (mode->status != MODE_OK) {
- list_del(&mode->head);
- if (verbose) {
- drm_mode_debug_printmodeline(mode);
- DRM_DEBUG_KMS("Not using %s mode %d\n",
- mode->name, mode->status);
- }
- drm_mode_destroy(dev, mode);
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_prune_invalid);
-
-/**
- * drm_mode_compare - compare modes for favorability
- * @lh_a: list_head for first mode
- * @lh_b: list_head for second mode
- *
- * LOCKING:
- * None.
- *
- * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
- * which is better.
- *
- * RETURNS:
- * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
- * positive if @lh_b is better than @lh_a.
- */
-static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
-{
- struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
- struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
- int diff;
-
- diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
- ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
- if (diff)
- return diff;
- diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
- if (diff)
- return diff;
- diff = b->clock - a->clock;
- return diff;
-}
-
-/* FIXME: what we don't have a list sort function? */
-/* list sort from Mark J Roberts (mjr@znex.org) */
-void list_sort(struct list_head *head,
- int (*cmp)(struct list_head *a, struct list_head *b))
-{
- struct list_head *p, *q, *e, *list, *tail, *oldhead;
- int insize, nmerges, psize, qsize, i;
-
- list = head->next;
- list_del(head);
- insize = 1;
- for (;;) {
- p = oldhead = list;
- list = tail = NULL;
- nmerges = 0;
-
- while (p) {
- nmerges++;
- q = p;
- psize = 0;
- for (i = 0; i < insize; i++) {
- psize++;
- q = q->next == oldhead ? NULL : q->next;
- if (!q)
- break;
- }
-
- qsize = insize;
- while (psize > 0 || (qsize > 0 && q)) {
- if (!psize) {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- } else if (!qsize || !q) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else if (cmp(p, q) <= 0) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- }
- if (tail)
- tail->next = e;
- else
- list = e;
- e->prev = tail;
- tail = e;
- }
- p = q;
- }
-
- tail->next = list;
- list->prev = tail;
-
- if (nmerges <= 1)
- break;
-
- insize *= 2;
- }
-
- head->next = list;
- head->prev = list->prev;
- list->prev->next = head;
- list->prev = head;
-}
-
-/**
- * drm_mode_sort - sort mode list
- * @mode_list: list to sort
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Sort @mode_list by favorability, putting good modes first.
- */
-void drm_mode_sort(struct list_head *mode_list)
-{
- list_sort(mode_list, drm_mode_compare);
-}
-EXPORT_SYMBOL(drm_mode_sort);
-
-/**
- * drm_mode_connector_list_update - update the mode list for the connector
- * @connector: the connector to update
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * This moves the modes from the @connector probed_modes list
- * to the actual mode list. It compares the probed mode against the current
- * list and only adds different modes. All modes unverified after this point
- * will be removed by the prune invalid modes.
- */
-void drm_mode_connector_list_update(struct drm_connector *connector)
-{
- struct drm_display_mode *mode;
- struct drm_display_mode *pmode, *pt;
- int found_it;
-
- list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
- head) {
- found_it = 0;
- /* go through current modes checking for the new probed mode */
- list_for_each_entry(mode, &connector->modes, head) {
- if (drm_mode_equal(pmode, mode)) {
- found_it = 1;
- /* if equal delete the probed mode */
- mode->status = pmode->status;
- /* Merge type bits together */
- mode->type |= pmode->type;
- list_del(&pmode->head);
- drm_mode_destroy(connector->dev, pmode);
- break;
- }
- }
-
- if (!found_it) {
- list_move_tail(&pmode->head, &connector->modes);
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_connector_list_update);
+++ /dev/null
-/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
-/**
- * \file drm_pci.c
- * \brief Functions and ioctls to manage PCI memory
- *
- * \warning These interfaces aren't stable yet.
- *
- * \todo Implement the remaining ioctl's for the PCI pools.
- * \todo The wrappers here are so thin that they would be better off inlined..
- *
- * \author José Fonseca <jrfonseca@tungstengraphics.com>
- * \author Leif Delgass <ldelgass@retinalburn.net>
- */
-
-/*
- * Copyright 2003 José Fonseca.
- * Copyright 2003 Leif Delgass.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include "drmP.h"
-
-/**********************************************************************/
-/** \name PCI memory */
-/*@{*/
-
-/**
- * \brief Allocate a PCI consistent memory block, for DMA.
- */
-drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
- dma_addr_t maxaddr)
-{
- drm_dma_handle_t *dmah;
-#if 1
- unsigned long addr;
- size_t sz;
-#endif
-
- /* pci_alloc_consistent only guarantees alignment to the smallest
- * PAGE_SIZE order which is greater than or equal to the requested size.
- * Return NULL here for now to make sure nobody tries for larger alignment
- */
- if (align > size)
- return NULL;
-
- if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
- DRM_ERROR("Setting pci dma mask failed\n");
- return NULL;
- }
-
- dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
- if (!dmah)
- return NULL;
-
- dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
-
- if (dmah->vaddr == NULL) {
- kfree(dmah);
- return NULL;
- }
-
- memset(dmah->vaddr, 0, size);
-
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Reserve */
- for (addr = (unsigned long)dmah->vaddr, sz = size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page(addr));
- }
-
- return dmah;
-}
-
-EXPORT_SYMBOL(drm_pci_alloc);
-
-/**
- * \brief Free a PCI consistent memory block without freeing its descriptor.
- *
- * This function is for internal use in the Linux-specific DRM core code.
- */
-void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
-#if 1
- unsigned long addr;
- size_t sz;
-#endif
-
- if (dmah->vaddr) {
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Unreserve */
- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- }
- dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
- dmah->busaddr);
- }
-}
-
-/**
- * \brief Free a PCI consistent memory block
- */
-void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
- __drm_pci_free(dev, dmah);
- kfree(dmah);
-}
-
-EXPORT_SYMBOL(drm_pci_free);
-
-/*@}*/
+++ /dev/null
-/**
- * \file drm_proc.c
- * /proc support for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * \par Acknowledgements:
- * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
- * the problem with the proc files not outputting all their information.
- */
-
-/*
- * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include "drmP.h"
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-/**
- * Proc file list.
- */
-static struct drm_info_list drm_proc_list[] = {
- {"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
- {"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
- {"bufs", drm_bufs_info, 0},
- {"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
- {"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
-
-static int drm_proc_open(struct inode *inode, struct file *file)
-{
- struct drm_info_node* node = PDE(inode)->data;
-
- return single_open(file, node->info_ent->show, node);
-}
-
-static const struct file_operations drm_proc_fops = {
- .owner = THIS_MODULE,
- .open = drm_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-/**
- * Initialize a given set of proc files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI proc dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of proc files represented by an array of
- * gdm_proc_lists in the given root directory.
- */
-int drm_proc_create_files(struct drm_info_list *files, int count,
- struct proc_dir_entry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct proc_dir_entry *ent;
- struct drm_info_node *tmp;
- char name[64];
- int i, ret;
-
- for (i = 0; i < count; i++) {
- u32 features = files[i].driver_features;
-
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
- continue;
-
- tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (tmp == NULL) {
- ret = -1;
- goto fail;
- }
- tmp->minor = minor;
- tmp->info_ent = &files[i];
- list_add(&tmp->list, &minor->proc_nodes.list);
-
- ent = proc_create_data(files[i].name, S_IRUGO, root,
- &drm_proc_fops, tmp);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
- name, files[i].name);
- list_del(&tmp->list);
- kfree(tmp);
- ret = -1;
- goto fail;
- }
-
- }
- return 0;
-
-fail:
- for (i = 0; i < count; i++)
- remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
- return ret;
-}
-
-/**
- * Initialize the DRI proc filesystem for a device
- *
- * \param dev DRM device
- * \param minor device minor number
- * \param root DRI proc dir entry.
- * \param dev_root resulting DRI device proc dir entry.
- * \return root entry pointer on success, or NULL on failure.
- *
- * Create the DRI proc root entry "/proc/dri", the device proc root entry
- * "/proc/dri/%minor%/", and each entry in proc_list as
- * "/proc/dri/%minor%/%name%".
- */
-int drm_proc_init(struct drm_minor *minor, int minor_id,
- struct proc_dir_entry *root)
-{
- struct drm_device *dev = minor->dev;
- char name[64];
- int ret;
-
- INIT_LIST_HEAD(&minor->proc_nodes.list);
- sprintf(name, "%d", minor_id);
- minor->proc_root = proc_mkdir(name, root);
- if (!minor->proc_root) {
- DRM_ERROR("Cannot create /proc/dri/%s\n", name);
- return -1;
- }
-
- ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
- minor->proc_root, minor);
- if (ret) {
- remove_proc_entry(name, root);
- minor->proc_root = NULL;
- DRM_ERROR("Failed to create core drm proc files\n");
- return ret;
- }
-
- if (dev->driver->proc_init) {
- ret = dev->driver->proc_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/proc/dri.\n");
- return ret;
- }
- }
- return 0;
-}
-
-int drm_proc_remove_files(struct drm_info_list *files, int count,
- struct drm_minor *minor)
-{
- struct list_head *pos, *q;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->proc_nodes.list) {
- tmp = list_entry(pos, struct drm_info_node, list);
- if (tmp->info_ent == &files[i]) {
- remove_proc_entry(files[i].name,
- minor->proc_root);
- list_del(pos);
- kfree(tmp);
- }
- }
- }
- return 0;
-}
-
-/**
- * Cleanup the proc filesystem resources.
- *
- * \param minor device minor number.
- * \param root DRI proc dir entry.
- * \param dev_root DRI device proc dir entry.
- * \return always zero.
- *
- * Remove all proc entries created by proc_init().
- */
-int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
-{
- struct drm_device *dev = minor->dev;
- char name[64];
-
- if (!root || !minor->proc_root)
- return 0;
-
- if (dev->driver->proc_cleanup)
- dev->driver->proc_cleanup(minor);
-
- drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
-
- sprintf(name, "%d", minor->index);
- remove_proc_entry(name, root);
-
- return 0;
-}
-
+++ /dev/null
-/**
- * \file drm_scatter.c
- * IOCTLs to manage scatter/gather memory
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/vmalloc.h>
-#include "drmP.h"
-
-#define DEBUG_SCATTER 0
-
-static inline void *drm_vmalloc_dma(unsigned long size)
-{
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
-#else
- return vmalloc_32(size);
-#endif
-}
-
-void drm_sg_cleanup(struct drm_sg_mem * entry)
-{
- struct page *page;
- int i;
-
- for (i = 0; i < entry->pages; i++) {
- page = entry->pagelist[i];
- if (page)
- ClearPageReserved(page);
- }
-
- vfree(entry->virtual);
-
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
-}
-
-#ifdef _LP64
-# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
-#else
-# define ScatterHandle(x) (unsigned int)(x)
-#endif
-
-int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
-{
- struct drm_sg_mem *entry;
- unsigned long pages, i, j;
-
- DRM_DEBUG("\n");
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
-
- if (dev->sg)
- return -EINVAL;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- memset(entry, 0, sizeof(*entry));
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
-
- entry->pages = pages;
- entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL);
- if (!entry->pagelist) {
- kfree(entry);
- return -ENOMEM;
- }
-
- memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
-
- entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL);
- if (!entry->busaddr) {
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
- memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
-
- entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
- if (!entry->virtual) {
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
-
- /* This also forces the mapping of COW pages, so our page list
- * will be valid. Please don't remove it...
- */
- memset(entry->virtual, 0, pages << PAGE_SHIFT);
-
- entry->handle = ScatterHandle((unsigned long)entry->virtual);
-
- DRM_DEBUG("handle = %08lx\n", entry->handle);
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- for (i = (unsigned long)entry->virtual, j = 0; j < pages;
- i += PAGE_SIZE, j++) {
- entry->pagelist[j] = vmalloc_to_page((void *)i);
- if (!entry->pagelist[j])
- goto failed;
- SetPageReserved(entry->pagelist[j]);
- }
-
- request->handle = entry->handle;
-
- dev->sg = entry;
-
-#if DEBUG_SCATTER
- /* Verify that each page points to its virtual address, and vice
- * versa.
- */
- {
- int error = 0;
-
- for (i = 0; i < pages; i++) {
- unsigned long *tmp;
-
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0xcafebabe;
- }
- tmp = (unsigned long *)((u8 *) entry->virtual +
- (PAGE_SIZE * i));
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- if (*tmp != 0xcafebabe && error == 0) {
- error = 1;
- DRM_ERROR("Scatter allocation error, "
- "pagelist does not match "
- "virtual mapping\n");
- }
- }
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0;
- }
- }
- if (error == 0)
- DRM_ERROR("Scatter allocation matches pagelist\n");
- }
-#endif
-
- return 0;
-
- failed:
- drm_sg_cleanup(entry);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_sg_alloc);
-
-
-int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
-
- return drm_sg_alloc(dev, request);
-
-}
-
-int drm_sg_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
- struct drm_sg_mem *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
-
- entry = dev->sg;
- dev->sg = NULL;
-
- if (!entry || entry->handle != request->handle)
- return -EINVAL;
-
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- drm_sg_cleanup(entry);
-
- return 0;
-}
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Simple memory manager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drm_sman.h"
-
-struct drm_owner_item {
- struct drm_hash_item owner_hash;
- struct list_head sman_list;
- struct list_head mem_blocks;
-};
-
-void drm_sman_takedown(struct drm_sman * sman)
-{
- drm_ht_remove(&sman->user_hash_tab);
- drm_ht_remove(&sman->owner_hash_tab);
- kfree(sman->mm);
-}
-
-EXPORT_SYMBOL(drm_sman_takedown);
-
-int
-drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
- unsigned int user_order, unsigned int owner_order)
-{
- int ret = 0;
-
- sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
- sizeof(*sman->mm),
- GFP_KERNEL);
- if (!sman->mm) {
- ret = -ENOMEM;
- goto out;
- }
- sman->num_managers = num_managers;
- INIT_LIST_HEAD(&sman->owner_items);
- ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
- if (ret)
- goto out1;
- ret = drm_ht_create(&sman->user_hash_tab, user_order);
- if (!ret)
- goto out;
-
- drm_ht_remove(&sman->owner_hash_tab);
-out1:
- kfree(sman->mm);
-out:
- return ret;
-}
-
-EXPORT_SYMBOL(drm_sman_init);
-
-static void *drm_sman_mm_allocate(void *private, unsigned long size,
- unsigned alignment)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- struct drm_mm_node *tmp;
-
- tmp = drm_mm_search_free(mm, size, alignment, 1);
- if (!tmp) {
- return NULL;
- }
- tmp = drm_mm_get_block(tmp, size, alignment);
- return tmp;
-}
-
-static void drm_sman_mm_free(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
-
- drm_mm_put_block(node);
-}
-
-static void drm_sman_mm_destroy(void *private)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- drm_mm_takedown(mm);
- kfree(mm);
-}
-
-static unsigned long drm_sman_mm_offset(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
- return node->start;
-}
-
-int
-drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
- unsigned long start, unsigned long size)
-{
- struct drm_sman_mm *sman_mm;
- struct drm_mm *mm;
- int ret;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- return -ENOMEM;
- }
- sman_mm->private = mm;
- ret = drm_mm_init(mm, start, size);
-
- if (ret) {
- kfree(mm);
- return ret;
- }
-
- sman_mm->allocate = drm_sman_mm_allocate;
- sman_mm->free = drm_sman_mm_free;
- sman_mm->destroy = drm_sman_mm_destroy;
- sman_mm->offset = drm_sman_mm_offset;
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_set_range);
-
-int
-drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
- struct drm_sman_mm * allocator)
-{
- BUG_ON(manager >= sman->num_managers);
- sman->mm[manager] = *allocator;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_sman_set_manager);
-
-static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
- unsigned long owner)
-{
- int ret;
- struct drm_hash_item *owner_hash_item;
- struct drm_owner_item *owner_item;
-
- ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
- if (!ret) {
- return drm_hash_entry(owner_hash_item, struct drm_owner_item,
- owner_hash);
- }
-
- owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
- if (!owner_item)
- goto out;
-
- INIT_LIST_HEAD(&owner_item->mem_blocks);
- owner_item->owner_hash.key = owner;
- if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
- goto out1;
-
- list_add_tail(&owner_item->sman_list, &sman->owner_items);
- return owner_item;
-
-out1:
- kfree(owner_item);
-out:
- return NULL;
-}
-
-struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
- unsigned long size, unsigned alignment,
- unsigned long owner)
-{
- void *tmp;
- struct drm_sman_mm *sman_mm;
- struct drm_owner_item *owner_item;
- struct drm_memblock_item *memblock;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- tmp = sman_mm->allocate(sman_mm->private, size, alignment);
-
- if (!tmp) {
- return NULL;
- }
-
- memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
-
- if (!memblock)
- goto out;
-
- memblock->mm_info = tmp;
- memblock->mm = sman_mm;
- memblock->sman = sman;
-
- if (drm_ht_just_insert_please
- (&sman->user_hash_tab, &memblock->user_hash,
- (unsigned long)memblock, 32, 0, 0))
- goto out1;
-
- owner_item = drm_sman_get_owner_item(sman, owner);
- if (!owner_item)
- goto out2;
-
- list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
-
- return memblock;
-
-out2:
- drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
-out1:
- kfree(memblock);
-out:
- sman_mm->free(sman_mm->private, tmp);
-
- return NULL;
-}
-
-EXPORT_SYMBOL(drm_sman_alloc);
-
-static void drm_sman_free(struct drm_memblock_item *item)
-{
- struct drm_sman *sman = item->sman;
-
- list_del(&item->owner_list);
- drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
- item->mm->free(item->mm->private, item->mm_info);
- kfree(item);
-}
-
-int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
-{
- struct drm_hash_item *hash_item;
- struct drm_memblock_item *memblock_item;
-
- if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
- return -EINVAL;
-
- memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
- user_hash);
- drm_sman_free(memblock_item);
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_free_key);
-
-static void drm_sman_remove_owner(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- list_del(&owner_item->sman_list);
- drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
- kfree(owner_item);
-}
-
-int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
- return -1;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
- drm_sman_remove_owner(sman, owner_item);
- return -1;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_owner_clean);
-
-static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- struct drm_memblock_item *entry, *next;
-
- list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
- owner_list) {
- drm_sman_free(entry);
- }
- drm_sman_remove_owner(sman, owner_item);
-}
-
-void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-
- return;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- drm_sman_do_owner_cleanup(sman, owner_item);
-}
-
-EXPORT_SYMBOL(drm_sman_owner_cleanup);
-
-void drm_sman_cleanup(struct drm_sman *sman)
-{
- struct drm_owner_item *entry, *next;
- unsigned int i;
- struct drm_sman_mm *sman_mm;
-
- list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
- drm_sman_do_owner_cleanup(sman, entry);
- }
- if (sman->mm) {
- for (i = 0; i < sman->num_managers; ++i) {
- sman_mm = &sman->mm[i];
- if (sman_mm->private) {
- sman_mm->destroy(sman_mm->private);
- sman_mm->private = NULL;
- }
- }
- }
-}
-
-EXPORT_SYMBOL(drm_sman_cleanup);
+++ /dev/null
-/**
- * \file drm_stub.h
- * Stub support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- */
-
-/*
- * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
- *
- * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include "drmP.h"
-#include "drm_core.h"
-
-unsigned int drm_debug = 0; /* 1 to enable debug output */
-EXPORT_SYMBOL(drm_debug);
-
-MODULE_AUTHOR(CORE_AUTHOR);
-MODULE_DESCRIPTION(CORE_DESC);
-MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output");
-
-module_param_named(debug, drm_debug, int, 0600);
-
-struct idr drm_minors_idr;
-
-struct class *drm_class;
-struct proc_dir_entry *drm_proc_root;
-struct dentry *drm_debugfs_root;
-void drm_ut_debug_printk(unsigned int request_level,
- const char *prefix,
- const char *function_name,
- const char *format, ...)
-{
- va_list args;
-
- if (drm_debug & request_level) {
- if (function_name)
- printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
- va_start(args, format);
- vprintk(format, args);
- va_end(args);
- }
-}
-EXPORT_SYMBOL(drm_ut_debug_printk);
-static int drm_minor_get_id(struct drm_device *dev, int type)
-{
- int new_id;
- int ret;
- int base = 0, limit = 63;
-
- if (type == DRM_MINOR_CONTROL) {
- base += 64;
- limit = base + 127;
- } else if (type == DRM_MINOR_RENDER) {
- base += 128;
- limit = base + 255;
- }
-
-again:
- if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
- mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&drm_minors_idr, NULL,
- base, &new_id);
- mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN) {
- goto again;
- } else if (ret) {
- return ret;
- }
-
- if (new_id >= limit) {
- idr_remove(&drm_minors_idr, new_id);
- return -EINVAL;
- }
- return new_id;
-}
-
-struct drm_master *drm_master_create(struct drm_minor *minor)
-{
- struct drm_master *master;
-
- master = kzalloc(sizeof(*master), GFP_KERNEL);
- if (!master)
- return NULL;
-
- kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
- drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
- INIT_LIST_HEAD(&master->magicfree);
- master->minor = minor;
-
- list_add_tail(&master->head, &minor->master_list);
-
- return master;
-}
-
-struct drm_master *drm_master_get(struct drm_master *master)
-{
- kref_get(&master->refcount);
- return master;
-}
-EXPORT_SYMBOL(drm_master_get);
-
-static void drm_master_destroy(struct kref *kref)
-{
- struct drm_master *master = container_of(kref, struct drm_master, refcount);
- struct drm_magic_entry *pt, *next;
- struct drm_device *dev = master->minor->dev;
- struct drm_map_list *r_list, *list_temp;
-
- list_del(&master->head);
-
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
- if (r_list->master == master) {
- drm_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
-
- if (master->unique) {
- kfree(master->unique);
- master->unique = NULL;
- master->unique_len = 0;
- }
-
- list_for_each_entry_safe(pt, next, &master->magicfree, head) {
- list_del(&pt->head);
- drm_ht_remove_item(&master->magiclist, &pt->hash_item);
- kfree(pt);
- }
-
- drm_ht_remove(&master->magiclist);
-
- kfree(master);
-}
-
-void drm_master_put(struct drm_master **master)
-{
- kref_put(&(*master)->refcount, drm_master_destroy);
- *master = NULL;
-}
-EXPORT_SYMBOL(drm_master_put);
-
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = 0;
-
- if (file_priv->is_master)
- return 0;
-
- if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
- return -EINVAL;
-
- if (!file_priv->master)
- return -EINVAL;
-
- if (!file_priv->minor->master &&
- file_priv->minor->master != file_priv->master) {
- mutex_lock(&dev->struct_mutex);
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
- }
- mutex_unlock(&dev->struct_mutex);
- }
-
- return 0;
-}
-
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- if (!file_priv->is_master)
- return -EINVAL;
-
- if (!file_priv->minor->master)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, false);
- drm_master_put(&file_priv->minor->master);
- file_priv->is_master = 0;
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- int retcode;
-
- INIT_LIST_HEAD(&dev->filelist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- INIT_LIST_HEAD(&dev->vblank_event_list);
-
- spin_lock_init(&dev->count_lock);
- spin_lock_init(&dev->drw_lock);
- spin_lock_init(&dev->event_lock);
- init_timer(&dev->timer);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
-
- idr_init(&dev->drw_idr);
-
- dev->pdev = pdev;
- dev->pci_device = pdev->device;
- dev->pci_vendor = pdev->vendor;
-
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
- if (drm_ht_create(&dev->map_hash, 12)) {
- return -ENOMEM;
- }
-
- /* the DRM has 6 basic counters */
- dev->counters = 6;
- dev->types[0] = _DRM_STAT_LOCK;
- dev->types[1] = _DRM_STAT_OPENS;
- dev->types[2] = _DRM_STAT_CLOSES;
- dev->types[3] = _DRM_STAT_IOCTLS;
- dev->types[4] = _DRM_STAT_LOCKS;
- dev->types[5] = _DRM_STAT_UNLOCKS;
-
- dev->driver = driver;
-
- if (drm_core_has_AGP(dev)) {
- if (drm_device_is_agp(dev))
- dev->agp = drm_agp_init(dev);
- if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
- && (dev->agp == NULL)) {
- DRM_ERROR("Cannot initialize the agpgart module.\n");
- retcode = -EINVAL;
- goto error_out_unreg;
- }
- if (drm_core_has_MTRR(dev)) {
- if (dev->agp)
- dev->agp->agp_mtrr =
- mtrr_add(dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
- }
- }
-
-
- retcode = drm_ctxbitmap_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto error_out_unreg;
- }
-
- if (driver->driver_features & DRIVER_GEM) {
- retcode = drm_gem_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot initialize graphics execution "
- "manager (GEM)\n");
- goto error_out_unreg;
- }
- }
-
- return 0;
-
- error_out_unreg:
- drm_lastclose(dev);
- return retcode;
-}
-
-
-/**
- * Get a secondary minor number.
- *
- * \param dev device data structure
- * \param sec-minor structure to hold the assigned minor
- * \return negative number on failure.
- *
- * Search an empty entry and initialize it to the given parameters, and
- * create the proc init entry via proc_init(). This routines assigns
- * minor numbers to secondary heads of multi-headed cards
- */
-static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
-{
- struct drm_minor *new_minor;
- int ret;
- int minor_id;
-
- DRM_DEBUG("\n");
-
- minor_id = drm_minor_get_id(dev, type);
- if (minor_id < 0)
- return minor_id;
-
- new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
- if (!new_minor) {
- ret = -ENOMEM;
- goto err_idr;
- }
-
- new_minor->type = type;
- new_minor->device = MKDEV(DRM_MAJOR, minor_id);
- new_minor->dev = dev;
- new_minor->index = minor_id;
- INIT_LIST_HEAD(&new_minor->master_list);
-
- idr_replace(&drm_minors_idr, new_minor, minor_id);
-
- if (type == DRM_MINOR_LEGACY) {
- ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
- goto err_mem;
- }
- } else
- new_minor->proc_root = NULL;
-
-#if defined(CONFIG_DEBUG_FS)
- ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- goto err_g2;
- }
-#endif
-
- ret = drm_sysfs_device_add(new_minor);
- if (ret) {
- printk(KERN_ERR
- "DRM: Error sysfs_device_add.\n");
- goto err_g2;
- }
- *minor = new_minor;
-
- DRM_DEBUG("new minor assigned %d\n", minor_id);
- return 0;
-
-
-err_g2:
- if (new_minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(new_minor, drm_proc_root);
-err_mem:
- kfree(new_minor);
-err_idr:
- idr_remove(&drm_minors_idr, minor_id);
- *minor = NULL;
- return ret;
-}
-
-/**
- * Register.
- *
- * \param pdev - PCI device structure
- * \param ent entry from the PCI ID table with device type flags
- * \return zero on success or a negative number on failure.
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- */
-int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- struct drm_device *dev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- ret = pci_enable_device(pdev);
- if (ret)
- goto err_g1;
-
- pci_set_master(pdev);
- if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g2;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- pci_set_drvdata(pdev, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g2;
- }
-
- if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
- goto err_g3;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, ent->driver_data);
- if (ret)
- goto err_g4;
- }
-
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
- if (ret)
- goto err_g4;
- }
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, pci_name(pdev), dev->primary->index);
-
- return 0;
-
-err_g4:
- drm_put_minor(&dev->primary);
-err_g3:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g2:
- pci_disable_device(pdev);
-err_g1:
- kfree(dev);
- return ret;
-}
-EXPORT_SYMBOL(drm_get_dev);
-
-/**
- * Put a secondary minor number.
- *
- * \param sec_minor - structure to be released
- * \return always zero
- *
- * Cleans up the proc resources. Not legal for this to be the
- * last minor released.
- *
- */
-int drm_put_minor(struct drm_minor **minor_p)
-{
- struct drm_minor *minor = *minor_p;
-
- DRM_DEBUG("release secondary minor %d\n", minor->index);
-
- if (minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(minor, drm_proc_root);
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_cleanup(minor);
-#endif
-
- drm_sysfs_device_remove(minor);
-
- idr_remove(&drm_minors_idr, minor->index);
-
- kfree(minor);
- *minor_p = NULL;
- return 0;
-}
-
-/**
- * Called via drm_exit() at module unload time or when pci device is
- * unplugged.
- *
- * Cleans up all DRM device, calling drm_lastclose().
- *
- * \sa drm_init
- */
-void drm_put_dev(struct drm_device *dev)
-{
- struct drm_driver *driver;
- struct drm_map_list *r_list, *list_temp;
-
- DRM_DEBUG("\n");
-
- if (!dev) {
- DRM_ERROR("cleanup called no dev\n");
- return;
- }
- driver = dev->driver;
-
- drm_vblank_cleanup(dev);
-
- drm_lastclose(dev);
-
- if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->agp_mtrr >= 0) {
- int retval;
- retval = mtrr_del(dev->agp->agp_mtrr,
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size * 1024 * 1024);
- DRM_DEBUG("mtrr_del=%d\n", retval);
- }
-
- if (dev->driver->unload)
- dev->driver->unload(dev);
-
- if (drm_core_has_AGP(dev) && dev->agp) {
- kfree(dev->agp);
- dev->agp = NULL;
- }
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_rmmap(dev, r_list->map);
- drm_ht_remove(&dev->map_hash);
-
- drm_ctxbitmap_cleanup(dev);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-
- if (driver->driver_features & DRIVER_GEM)
- drm_gem_destroy(dev);
-
- drm_put_minor(&dev->primary);
-
- if (dev->devname) {
- kfree(dev->devname);
- dev->devname = NULL;
- }
- kfree(dev);
-}
-EXPORT_SYMBOL(drm_put_dev);
+++ /dev/null
-
-/*
- * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
- * extra sysfs attribute from DRM. Normal drm_sysfs_class
- * does not allow adding attributes.
- *
- * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
- * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
- * Copyright (c) 2003-2004 IBM Corp.
- *
- * This file is released under the GPLv2
- *
- */
-
-#include <linux/device.h>
-#include <linux/kdev_t.h>
-#include <linux/err.h>
-
-#include "drm_sysfs.h"
-#include "drm_core.h"
-#include "drmP.h"
-
-#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
-#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
-
-static struct device_type drm_sysfs_device_minor = {
- .name = "drm_minor"
-};
-
-/**
- * drm_class_suspend - DRM class suspend hook
- * @dev: Linux device to suspend
- * @state: power state to enter
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its suspend hook, if present.
- */
-static int drm_class_suspend(struct device *dev, pm_message_t state)
-{
- if (dev->type == &drm_sysfs_device_minor) {
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->suspend)
- return drm_dev->driver->suspend(drm_dev, state);
- }
- return 0;
-}
-
-/**
- * drm_class_resume - DRM class resume hook
- * @dev: Linux device to resume
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its resume hook, if present.
- */
-static int drm_class_resume(struct device *dev)
-{
- if (dev->type == &drm_sysfs_device_minor) {
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->resume)
- return drm_dev->driver->resume(drm_dev);
- }
- return 0;
-}
-
-/* Display the version of drm_core. This doesn't work right in current design */
-static ssize_t version_show(struct class *dev, char *buf)
-{
- return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
- CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
-}
-
-static char *drm_devnode(struct device *dev, mode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
-}
-
-static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
-
-/**
- * drm_sysfs_create - create a struct drm_sysfs_class structure
- * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
- * @name: pointer to a string for the name of this class.
- *
- * This is used to create DRM class pointer that can then be used
- * in calls to drm_sysfs_device_add().
- *
- * Note, the pointer created here is to be destroyed when finished by making a
- * call to drm_sysfs_destroy().
- */
-struct class *drm_sysfs_create(struct module *owner, char *name)
-{
- struct class *class;
- int err;
-
- class = class_create(owner, name);
- if (IS_ERR(class)) {
- err = PTR_ERR(class);
- goto err_out;
- }
-
- class->suspend = drm_class_suspend;
- class->resume = drm_class_resume;
-
- err = class_create_file(class, &class_attr_version);
- if (err)
- goto err_out_class;
-
- class->devnode = drm_devnode;
-
- return class;
-
-err_out_class:
- class_destroy(class);
-err_out:
- return ERR_PTR(err);
-}
-
-/**
- * drm_sysfs_destroy - destroys DRM class
- *
- * Destroy the DRM device class.
- */
-void drm_sysfs_destroy(void)
-{
- if ((drm_class == NULL) || (IS_ERR(drm_class)))
- return;
- class_remove_file(drm_class, &class_attr_version);
- class_destroy(drm_class);
-}
-
-/**
- * drm_sysfs_device_release - do nothing
- * @dev: Linux device
- *
- * Normally, this would free the DRM device associated with @dev, along
- * with cleaning up any other stuff. But we do that in the DRM core, so
- * this function can just return and hope that the core does its job.
- */
-static void drm_sysfs_device_release(struct device *dev)
-{
- memset(dev, 0, sizeof(struct device));
- return;
-}
-
-/*
- * Connector properties
- */
-static ssize_t status_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- enum drm_connector_status status;
-
- status = connector->funcs->detect(connector);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- drm_get_connector_status_name(status));
-}
-
-static ssize_t dpms_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- uint64_t dpms_status;
- int ret;
-
- ret = drm_connector_property_get_value(connector,
- dev->mode_config.dpms_property,
- &dpms_status);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- drm_get_dpms_name((int)dpms_status));
-}
-
-static ssize_t enabled_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
-
- return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
- "disabled");
-}
-
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- struct device *connector_dev = container_of(kobj, struct device, kobj);
- struct drm_connector *connector = to_drm_connector(connector_dev);
- unsigned char *edid;
- size_t size;
-
- if (!connector->edid_blob_ptr)
- return 0;
-
- edid = connector->edid_blob_ptr->data;
- size = connector->edid_blob_ptr->length;
- if (!edid)
- return 0;
-
- if (off >= size)
- return 0;
-
- if (off + count > size)
- count = size - off;
- memcpy(buf, edid + off, count);
-
- return count;
-}
-
-static ssize_t modes_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_display_mode *mode;
- int written = 0;
-
- list_for_each_entry(mode, &connector->modes, head) {
- written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
- mode->name);
- }
-
- return written;
-}
-
-static ssize_t subconnector_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- struct drm_property *prop = NULL;
- uint64_t subconnector;
- int is_tv = 0;
- int ret;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- prop = dev->mode_config.dvi_i_subconnector_property;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- prop = dev->mode_config.tv_subconnector_property;
- is_tv = 1;
- break;
- default:
- DRM_ERROR("Wrong connector type for this property\n");
- return 0;
- }
-
- if (!prop) {
- DRM_ERROR("Unable to find subconnector property\n");
- return 0;
- }
-
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
- drm_get_tv_subconnector_name((int)subconnector) :
- drm_get_dvi_i_subconnector_name((int)subconnector));
-}
-
-static ssize_t select_subconnector_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- struct drm_property *prop = NULL;
- uint64_t subconnector;
- int is_tv = 0;
- int ret;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- prop = dev->mode_config.dvi_i_select_subconnector_property;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- prop = dev->mode_config.tv_select_subconnector_property;
- is_tv = 1;
- break;
- default:
- DRM_ERROR("Wrong connector type for this property\n");
- return 0;
- }
-
- if (!prop) {
- DRM_ERROR("Unable to find select subconnector property\n");
- return 0;
- }
-
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
- drm_get_tv_select_name((int)subconnector) :
- drm_get_dvi_i_select_name((int)subconnector));
-}
-
-static struct device_attribute connector_attrs[] = {
- __ATTR_RO(status),
- __ATTR_RO(enabled),
- __ATTR_RO(dpms),
- __ATTR_RO(modes),
-};
-
-/* These attributes are for both DVI-I connectors and all types of tv-out. */
-static struct device_attribute connector_attrs_opt1[] = {
- __ATTR_RO(subconnector),
- __ATTR_RO(select_subconnector),
-};
-
-static struct bin_attribute edid_attr = {
- .attr.name = "edid",
- .attr.mode = 0444,
- .size = 128,
- .read = edid_show,
-};
-
-/**
- * drm_sysfs_connector_add - add an connector to sysfs
- * @connector: connector to add
- *
- * Create an connector device in sysfs, along with its associated connector
- * properties (so far, connection status, dpms, mode list & edid) and
- * generate a hotplug event so userspace knows there's a new connector
- * available.
- *
- * Note:
- * This routine should only be called *once* for each DRM minor registered.
- * A second call for an already registered device will trigger the BUG_ON
- * below.
- */
-int drm_sysfs_connector_add(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- int ret = 0, i, j;
-
- /* We shouldn't get called more than once for the same connector */
- BUG_ON(device_is_registered(&connector->kdev));
-
- connector->kdev.parent = &dev->primary->kdev;
- connector->kdev.class = drm_class;
- connector->kdev.release = drm_sysfs_device_release;
-
- DRM_DEBUG("adding \"%s\" to sysfs\n",
- drm_get_connector_name(connector));
-
- dev_set_name(&connector->kdev, "card%d-%s",
- dev->primary->index, drm_get_connector_name(connector));
- ret = device_register(&connector->kdev);
-
- if (ret) {
- DRM_ERROR("failed to register connector device: %d\n", ret);
- goto out;
- }
-
- /* Standard attributes */
-
- for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs[i]);
- if (ret)
- goto err_out_files;
- }
-
- /* Optional attributes */
- /*
- * In the long run it maybe a good idea to make one set of
- * optionals per connector type.
- */
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
- if (ret)
- goto err_out_files;
- }
- break;
- default:
- break;
- }
-
- ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
- if (ret)
- goto err_out_files;
-
- /* Let userspace know we have a new connector */
- drm_sysfs_hotplug_event(dev);
-
- return 0;
-
-err_out_files:
- if (i > 0)
- for (j = 0; j < i; j++)
- device_remove_file(&connector->kdev,
- &connector_attrs[i]);
- device_unregister(&connector->kdev);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(drm_sysfs_connector_add);
-
-/**
- * drm_sysfs_connector_remove - remove an connector device from sysfs
- * @connector: connector to remove
- *
- * Remove @connector and its associated attributes from sysfs. Note that
- * the device model core will take care of sending the "remove" uevent
- * at this time, so we don't need to do it.
- *
- * Note:
- * This routine should only be called if the connector was previously
- * successfully registered. If @connector hasn't been registered yet,
- * you'll likely see a panic somewhere deep in sysfs code when called.
- */
-void drm_sysfs_connector_remove(struct drm_connector *connector)
-{
- int i;
-
- DRM_DEBUG("removing \"%s\" from sysfs\n",
- drm_get_connector_name(connector));
-
- for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
- device_remove_file(&connector->kdev, &connector_attrs[i]);
- sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
- device_unregister(&connector->kdev);
-}
-EXPORT_SYMBOL(drm_sysfs_connector_remove);
-
-/**
- * drm_sysfs_hotplug_event - generate a DRM uevent
- * @dev: DRM device
- *
- * Send a uevent for the DRM device specified by @dev. Currently we only
- * set HOTPLUG=1 in the uevent environment, but this could be expanded to
- * deal with other types of events.
- */
-void drm_sysfs_hotplug_event(struct drm_device *dev)
-{
- char *event_string = "HOTPLUG=1";
- char *envp[] = { event_string, NULL };
-
- DRM_DEBUG("generating hotplug event\n");
-
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
-}
-EXPORT_SYMBOL(drm_sysfs_hotplug_event);
-
-/**
- * drm_sysfs_device_add - adds a class device to sysfs for a character driver
- * @dev: DRM device to be added
- * @head: DRM head in question
- *
- * Add a DRM device to the DRM's device model class. We use @dev's PCI device
- * as the parent for the Linux device, and make sure it has a file containing
- * the driver we're using (for userspace compatibility).
- */
-int drm_sysfs_device_add(struct drm_minor *minor)
-{
- int err;
- char *minor_str;
-
- minor->kdev.parent = &minor->dev->pdev->dev;
- minor->kdev.class = drm_class;
- minor->kdev.release = drm_sysfs_device_release;
- minor->kdev.devt = minor->device;
- minor->kdev.type = &drm_sysfs_device_minor;
- if (minor->type == DRM_MINOR_CONTROL)
- minor_str = "controlD%d";
- else if (minor->type == DRM_MINOR_RENDER)
- minor_str = "renderD%d";
- else
- minor_str = "card%d";
-
- dev_set_name(&minor->kdev, minor_str, minor->index);
-
- err = device_register(&minor->kdev);
- if (err) {
- DRM_ERROR("device add failed: %d\n", err);
- goto err_out;
- }
-
- return 0;
-
-err_out:
- return err;
-}
-
-/**
- * drm_sysfs_device_remove - remove DRM device
- * @dev: DRM device to remove
- *
- * This call unregisters and cleans up a class device that was created with a
- * call to drm_sysfs_device_add()
- */
-void drm_sysfs_device_remove(struct drm_minor *minor)
-{
- device_unregister(&minor->kdev);
-}
-
-
-/**
- * drm_class_device_register - Register a struct device in the drm class.
- *
- * @dev: pointer to struct device to register.
- *
- * @dev should have all relevant members pre-filled with the exception
- * of the class member. In particular, the device_type member must
- * be set.
- */
-
-int drm_class_device_register(struct device *dev)
-{
- dev->class = drm_class;
- return device_register(dev);
-}
-EXPORT_SYMBOL_GPL(drm_class_device_register);
-
-void drm_class_device_unregister(struct device *dev)
-{
- return device_unregister(dev);
-}
-EXPORT_SYMBOL_GPL(drm_class_device_unregister);
+++ /dev/null
-/**
- * \file drm_vm.c
- * Memory mapping for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#if defined(__ia64__)
-#include <linux/efi.h>
-#endif
-
-static void drm_vm_open(struct vm_area_struct *vma);
-static void drm_vm_close(struct vm_area_struct *vma);
-
-static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__i386__) || defined(__x86_64__)
- if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
- pgprot_val(tmp) |= _PAGE_PCD;
- pgprot_val(tmp) &= ~_PAGE_PWT;
- }
-#elif defined(__powerpc__)
- pgprot_val(tmp) |= _PAGE_NO_CACHE;
- if (map_type == _DRM_REGISTERS)
- pgprot_val(tmp) |= _PAGE_GUARDED;
-#elif defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end -
- vma->vm_start))
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__)
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
-}
-
-static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- tmp |= _PAGE_NO_CACHE;
-#endif
- return tmp;
-}
-
-/**
- * \c fault method for AGP virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Find the right map and if it's AGP memory find the real physical page to
- * map, get the page, increment the use count and return it.
- */
-#if __OS_HAS_AGP
-static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- struct drm_hash_item *hash;
-
- /*
- * Find the right map
- */
- if (!drm_core_has_AGP(dev))
- goto vm_fault_error;
-
- if (!dev->agp || !dev->agp->cant_use_aperture)
- goto vm_fault_error;
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
- goto vm_fault_error;
-
- r_list = drm_hash_entry(hash, struct drm_map_list, hash);
- map = r_list->map;
-
- if (map && map->type == _DRM_AGP) {
- /*
- * Using vm_pgoff as a selector forces us to use this unusual
- * addressing scheme.
- */
- resource_size_t offset = (unsigned long)vmf->virtual_address -
- vma->vm_start;
- resource_size_t baddr = map->offset + offset;
- struct drm_agp_mem *agpmem;
- struct page *page;
-
-#ifdef __alpha__
- /*
- * Adjust to a bus-relative address
- */
- baddr -= dev->hose->mem_space->start;
-#endif
-
- /*
- * It's AGP memory - find the real physical page to map
- */
- list_for_each_entry(agpmem, &dev->agp->memory, head) {
- if (agpmem->bound <= baddr &&
- agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
- break;
- }
-
- if (!agpmem)
- goto vm_fault_error;
-
- /*
- * Get the page, inc the use count, and return it
- */
- offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
- page = agpmem->memory->pages[offset];
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG
- ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
- (unsigned long long)baddr,
- agpmem->memory->pages[offset],
- (unsigned long long)offset,
- page_count(page));
- return 0;
- }
-vm_fault_error:
- return VM_FAULT_SIGBUS; /* Disallow mremap */
-}
-#else /* __OS_HAS_AGP */
-static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return VM_FAULT_SIGBUS;
-}
-#endif /* __OS_HAS_AGP */
-
-/**
- * \c nopage method for shared virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Get the mapping, find the real physical page to map, get the page, and
- * return it.
- */
-static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_local_map *map = vma->vm_private_data;
- unsigned long offset;
- unsigned long i;
- struct page *page;
-
- if (!map)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
- i = (unsigned long)map->handle + offset;
- page = vmalloc_to_page((void *)i);
- if (!page)
- return VM_FAULT_SIGBUS;
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("shm_fault 0x%lx\n", offset);
- return 0;
-}
-
-/**
- * \c close method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Deletes map information if we are the last
- * person to close a mapping and it's not in the global maplist.
- */
-static void drm_vm_shm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
- int found_maps = 0;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
-
- map = vma->vm_private_data;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma->vm_private_data == map)
- found_maps++;
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- }
- }
-
- /* We were the only map that was found */
- if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
- /* Check to see if we are in the maplist, if we are not, then
- * we delete this mappings information.
- */
- found_maps = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map == map)
- found_maps++;
- }
-
- if (!found_maps) {
- drm_dma_handle_t dmah;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
- int retcode;
- retcode = mtrr_del(map->mtrr,
- map->offset,
- map->size);
- DRM_DEBUG("mtrr_del = %d\n", retcode);
- }
- iounmap(map->handle);
- break;
- case _DRM_SHM:
- vfree(map->handle);
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_pci_free(dev, &dmah);
- break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
- }
- kfree(map);
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * \c fault method for DMA virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
- */
-static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_device_dma *dma = dev->dma;
- unsigned long offset;
- unsigned long page_nr;
- struct page *page;
-
- if (!dma)
- return VM_FAULT_SIGBUS; /* Error */
- if (!dma->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
- page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
- page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
-
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
- return 0;
-}
-
-/**
- * \c fault method for scatter-gather virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
- */
-static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_local_map *map = vma->vm_private_data;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_sg_mem *entry = dev->sg;
- unsigned long offset;
- unsigned long map_offset;
- unsigned long page_offset;
- struct page *page;
-
- if (!entry)
- return VM_FAULT_SIGBUS; /* Error */
- if (!entry->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
- map_offset = map->offset - (unsigned long)dev->sg->virtual;
- page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
- page = entry->pagelist[page_offset];
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_fault(vma, vmf);
-}
-
-static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_shm_fault(vma, vmf);
-}
-
-static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_dma_fault(vma, vmf);
-}
-
-static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_sg_fault(vma, vmf);
-}
-
-/** AGP virtual memory operations */
-static const struct vm_operations_struct drm_vm_ops = {
- .fault = drm_vm_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Shared virtual memory operations */
-static const struct vm_operations_struct drm_vm_shm_ops = {
- .fault = drm_vm_shm_fault,
- .open = drm_vm_open,
- .close = drm_vm_shm_close,
-};
-
-/** DMA virtual memory operations */
-static const struct vm_operations_struct drm_vm_dma_ops = {
- .fault = drm_vm_dma_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Scatter-gather virtual memory operations */
-static const struct vm_operations_struct drm_vm_sg_ops = {
- .fault = drm_vm_sg_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/**
- * \c open method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Create a new drm_vma_entry structure as the \p vma private data entry and
- * add it to drm_device::vmalist.
- */
-void drm_vm_open_locked(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *vma_entry;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_inc(&dev->vma_count);
-
- vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
- if (vma_entry) {
- vma_entry->vma = vma;
- vma_entry->pid = current->pid;
- list_add(&vma_entry->head, &dev->vmalist);
- }
-}
-
-static void drm_vm_open(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- break;
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * Sets the virtual memory area operations structure to vm_dma_ops, the file
- * pointer, and calls vm_open().
- */
-static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- struct drm_device_dma *dma;
- unsigned long length = vma->vm_end - vma->vm_start;
-
- dev = priv->minor->dev;
- dma = dev->dma;
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- /* Length must match exact page count */
- if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
- return -EINVAL;
- }
-
- if (!capable(CAP_SYS_ADMIN) &&
- (dma->flags & _DRM_DMA_USE_PCI_RO)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- vma->vm_ops = &drm_vm_dma_ops;
-
- vma->vm_flags |= VM_RESERVED; /* Don't swap */
- vma->vm_flags |= VM_DONTEXPAND;
-
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
- return 0;
-}
-
-resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
-{
- return map->offset;
-}
-
-EXPORT_SYMBOL(drm_core_get_map_ofs);
-
-resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
-{
-#ifdef __alpha__
- return dev->hose->dense_mem_base - dev->hose->mem_space->start;
-#else
- return 0;
-#endif
-}
-
-EXPORT_SYMBOL(drm_core_get_reg_ofs);
-
-/**
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * If the virtual memory area has no offset associated with it then it's a DMA
- * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
- * checks that the restricted flag is not set, sets the virtual memory operations
- * according to the mapping type and remaps the pages. Finally sets the file
- * pointer and calls vm_open().
- */
-int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- resource_size_t offset = 0;
- struct drm_hash_item *hash;
-
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- if (!priv->authenticated)
- return -EACCES;
-
- /* We check for "dma". On Apple's UniNorth, it's valid to have
- * the AGP mapped at physical address 0
- * --BenH.
- */
- if (!vma->vm_pgoff
-#if __OS_HAS_AGP
- && (!dev->agp
- || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
-#endif
- )
- return drm_mmap_dma(filp, vma);
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
- DRM_ERROR("Could not find map\n");
- return -EINVAL;
- }
-
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
- return -EPERM;
-
- /* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- switch (map->type) {
- case _DRM_AGP:
- if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
- /*
- * On some platforms we can't talk to bus dma address from the CPU, so for
- * memory of type DRM_AGP, we'll deal with sorting out the real physical
- * pages and mappings in fault()
- */
-#if defined(__powerpc__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-#endif
- vma->vm_ops = &drm_vm_ops;
- break;
- }
- /* fall through to _DRM_FRAME_BUFFER... */
- case _DRM_FRAME_BUFFER:
- case _DRM_REGISTERS:
- offset = dev->driver->get_reg_ofs(dev);
- vma->vm_flags |= VM_IO; /* not in core dump */
- vma->vm_page_prot = drm_io_prot(map->type, vma);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
- DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
- " offset = 0x%llx\n",
- map->type,
- vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
- vma->vm_ops = &drm_vm_ops;
- break;
- case _DRM_CONSISTENT:
- /* Consistent memory is really like shared memory. But
- * it's allocated in a different way, so avoid fault */
- if (remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(virt_to_page(map->handle)),
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- /* fall through to _DRM_SHM */
- case _DRM_SHM:
- vma->vm_ops = &drm_vm_shm_ops;
- vma->vm_private_data = (void *)map;
- /* Don't let this area swap. Change when
- DRM_KERNEL advisory is supported. */
- vma->vm_flags |= VM_RESERVED;
- break;
- case _DRM_SCATTER_GATHER:
- vma->vm_ops = &drm_vm_sg_ops;
- vma->vm_private_data = (void *)map;
- vma->vm_flags |= VM_RESERVED;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- break;
- default:
- return -EINVAL; /* This should never happen. */
- }
- vma->vm_flags |= VM_RESERVED; /* Don't swap */
- vma->vm_flags |= VM_DONTEXPAND;
-
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
- return 0;
-}
-
-int drm_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm_mmap_locked(filp, vma);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_mmap);
+++ /dev/null
-/**
- * \file ati_pcigart.c
- * ATI PCI GART support
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
-
-static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
- struct drm_ati_pcigart_info *gart_info)
-{
- gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
- PAGE_SIZE,
- gart_info->table_mask);
- if (gart_info->table_handle == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-static void drm_ati_free_pcigart_table(struct drm_device *dev,
- struct drm_ati_pcigart_info *gart_info)
-{
- drm_pci_free(dev, gart_info->table_handle);
- gart_info->table_handle = NULL;
-}
-
-int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
-{
- struct drm_sg_mem *entry = dev->sg;
- unsigned long pages;
- int i;
- int max_pages;
-
- /* we need to support large memory configurations */
- if (!entry) {
- DRM_ERROR("no scatter/gather memory!\n");
- return 0;
- }
-
- if (gart_info->bus_addr) {
-
- max_pages = (gart_info->table_size / sizeof(u32));
- pages = (entry->pages <= max_pages)
- ? entry->pages : max_pages;
-
- for (i = 0; i < pages; i++) {
- if (!entry->busaddr[i])
- break;
- pci_unmap_page(dev->pdev, entry->busaddr[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
- gart_info->bus_addr = 0;
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
- gart_info->table_handle) {
- drm_ati_free_pcigart_table(dev, gart_info);
- }
-
- return 1;
-}
-EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
-
-int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
-{
- struct drm_local_map *map = &gart_info->mapping;
- struct drm_sg_mem *entry = dev->sg;
- void *address = NULL;
- unsigned long pages;
- u32 *pci_gart = NULL, page_base, gart_idx;
- dma_addr_t bus_address = 0;
- int i, j, ret = 0;
- int max_ati_pages, max_real_pages;
-
- if (!entry) {
- DRM_ERROR("no scatter/gather memory!\n");
- goto done;
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
- DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
-
- ret = drm_ati_alloc_pcigart_table(dev, gart_info);
- if (ret) {
- DRM_ERROR("cannot allocate PCI GART page!\n");
- goto done;
- }
-
- pci_gart = gart_info->table_handle->vaddr;
- address = gart_info->table_handle->vaddr;
- bus_address = gart_info->table_handle->busaddr;
- } else {
- address = gart_info->addr;
- bus_address = gart_info->bus_addr;
- DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
- (unsigned long long)bus_address,
- (unsigned long)address);
- }
-
-
- max_ati_pages = (gart_info->table_size / sizeof(u32));
- max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
- pages = (entry->pages <= max_real_pages)
- ? entry->pages : max_real_pages;
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
- memset(pci_gart, 0, max_ati_pages * sizeof(u32));
- } else {
- memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
- }
-
- gart_idx = 0;
- for (i = 0; i < pages; i++) {
- /* we need to support large memory configurations */
- entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (entry->busaddr[i] == 0) {
- DRM_ERROR("unable to map PCIGART pages!\n");
- drm_ati_pcigart_cleanup(dev, gart_info);
- address = NULL;
- bus_address = 0;
- goto done;
- }
- page_base = (u32) entry->busaddr[i];
-
- for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
- u32 val;
-
- switch(gart_info->gart_reg_if) {
- case DRM_ATI_GART_IGP:
- val = page_base | 0xc;
- break;
- case DRM_ATI_GART_PCIE:
- val = (page_base >> 8) | 0xc;
- break;
- default:
- case DRM_ATI_GART_PCI:
- val = page_base;
- break;
- }
- if (gart_info->gart_table_location ==
- DRM_ATI_GART_MAIN)
- pci_gart[gart_idx] = cpu_to_le32(val);
- else
- DRM_WRITE32(map, gart_idx * sizeof(u32), val);
- gart_idx++;
- page_base += ATI_PCIGART_PAGE_SIZE;
- }
- }
- ret = 1;
-
-#if defined(__i386__) || defined(__x86_64__)
- wbinvd();
-#else
- mb();
-#endif
-
- done:
- gart_info->addr = address;
- gart_info->bus_addr = bus_address;
- return ret;
-}
-EXPORT_SYMBOL(drm_ati_pcigart_init);
+++ /dev/null
-/**
- * \file drm_agpsupport.c
- * DRM support for AGP/GART backend
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include <linux/module.h>
-
-#if __OS_HAS_AGP
-
-#include <asm/agp.h>
-
-/**
- * Get AGP information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a (output) drm_agp_info structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been initialized and acquired and fills in the
- * drm_agp_info structure with the information in drm_agp_head::agp_info.
- */
-int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
-{
- DRM_AGP_KERN *kern;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- kern = &dev->agp->agp_info;
- info->agp_version_major = kern->version.major;
- info->agp_version_minor = kern->version.minor;
- info->mode = kern->mode;
- info->aperture_base = kern->aper_base;
- info->aperture_size = kern->aper_size * 1024 * 1024;
- info->memory_allowed = kern->max_memory << PAGE_SHIFT;
- info->memory_used = kern->current_memory << PAGE_SHIFT;
- info->id_vendor = kern->device->vendor;
- info->id_device = kern->device->device;
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_agp_info);
-
-int drm_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_info *info = data;
- int err;
-
- err = drm_agp_info(dev, info);
- if (err)
- return err;
-
- return 0;
-}
-
-/**
- * Acquire the AGP device.
- *
- * \param dev DRM device that is to acquire AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_agp_acquire(struct drm_device * dev)
-{
- if (!dev->agp)
- return -ENODEV;
- if (dev->agp->acquired)
- return -EBUSY;
- if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
- return -ENODEV;
- dev->agp->acquired = 1;
- return 0;
-}
-
-EXPORT_SYMBOL(drm_agp_acquire);
-
-/**
- * Acquire the AGP device (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
-}
-
-/**
- * Release the AGP device.
- *
- * \param dev DRM device that is to release AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired and calls \c agp_backend_release.
- */
-int drm_agp_release(struct drm_device * dev)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- agp_backend_release(dev->agp->bridge);
- dev->agp->acquired = 0;
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_release);
-
-int drm_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_agp_release(dev);
-}
-
-/**
- * Enable the AGP bus.
- *
- * \param dev DRM device that has previously acquired AGP.
- * \param mode Requested AGP mode.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired but not enabled, and calls
- * \c agp_enable.
- */
-int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- dev->agp->mode = mode.mode;
- agp_enable(dev->agp->bridge, mode.mode);
- dev->agp->enabled = 1;
- return 0;
-}
-
-EXPORT_SYMBOL(drm_agp_enable);
-
-int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_mode *mode = data;
-
- return drm_agp_enable(dev, *mode);
-}
-
-/**
- * Allocate AGP memory.
- *
- * \param inode device inode.
- * \param file_priv file private pointer.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired, allocates the
- * memory via alloc_agp() and creates a drm_agp_mem entry for it.
- */
-int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
- DRM_AGP_MEM *memory;
- unsigned long pages;
- u32 type;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
- return -ENOMEM;
-
- memset(entry, 0, sizeof(*entry));
-
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- type = (u32) request->type;
- if (!(memory = drm_alloc_agp(dev, pages, type))) {
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->handle = (unsigned long)memory->key + 1;
- entry->memory = memory;
- entry->bound = 0;
- entry->pages = pages;
- list_add(&entry->head, &dev->agp->memory);
-
- request->handle = entry->handle;
- request->physical = memory->physical;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_alloc);
-
-
-int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_agp_alloc(dev, request);
-}
-
-/**
- * Search for the AGP memory entry associated with a handle.
- *
- * \param dev DRM device structure.
- * \param handle AGP memory handle.
- * \return pointer to the drm_agp_mem structure associated with \p handle.
- *
- * Walks through drm_agp_head::memory until finding a matching handle.
- */
-static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
- unsigned long handle)
-{
- struct drm_agp_mem *entry;
-
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if (entry->handle == handle)
- return entry;
- }
- return NULL;
-}
-
-/**
- * Unbind AGP memory from the GATT (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and acquired, looks-up the AGP memory
- * entry and passes it to the unbind_agp() function.
- */
-int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int ret;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (!entry->bound)
- return -EINVAL;
- ret = drm_unbind_agp(entry->memory);
- if (ret == 0)
- entry->bound = 0;
- return ret;
-}
-EXPORT_SYMBOL(drm_agp_unbind);
-
-
-int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_agp_unbind(dev, request);
-}
-
-/**
- * Bind AGP memory into the GATT (ioctl)
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and that no memory
- * is currently bound into the GATT. Looks-up the AGP memory entry and passes
- * it to bind_agp() function.
- */
-int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int retcode;
- int page;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (entry->bound)
- return -EINVAL;
- page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
- if ((retcode = drm_bind_agp(entry->memory, page)))
- return retcode;
- entry->bound = dev->agp->base + (page << PAGE_SHIFT);
- DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
- dev->agp->base, entry->bound);
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_bind);
-
-
-int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_agp_bind(dev, request);
-}
-
-/**
- * Free AGP memory (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and looks up the
- * AGP memory entry. If the memory it's currently bound, unbind it via
- * unbind_agp(). Frees it via free_agp() as well as the entry itself
- * and unlinks from the doubly linked list it's inserted in.
- */
-int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (entry->bound)
- drm_unbind_agp(entry->memory);
-
- list_del(&entry->head);
-
- drm_free_agp(entry->memory, entry->pages);
- kfree(entry);
- return 0;
-}
-EXPORT_SYMBOL(drm_agp_free);
-
-
-
-int drm_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_agp_free(dev, request);
-}
-
-/**
- * Initialize the AGP resources.
- *
- * \return pointer to a drm_agp_head structure.
- *
- * Gets the drm_agp_t structure which is made available by the agpgart module
- * via the inter_module_* functions. Creates and initializes a drm_agp_head
- * structure.
- */
-struct drm_agp_head *drm_agp_init(struct drm_device *dev)
-{
- struct drm_agp_head *head = NULL;
-
- if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
- return NULL;
- memset((void *)head, 0, sizeof(*head));
- head->bridge = agp_find_bridge(dev->pdev);
- if (!head->bridge) {
- if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
- kfree(head);
- return NULL;
- }
- agp_copy_info(head->bridge, &head->agp_info);
- agp_backend_release(head->bridge);
- } else {
- agp_copy_info(head->bridge, &head->agp_info);
- }
- if (head->agp_info.chipset == NOT_SUPPORTED) {
- kfree(head);
- return NULL;
- }
- INIT_LIST_HEAD(&head->memory);
- head->cant_use_aperture = head->agp_info.cant_use_aperture;
- head->page_mask = head->agp_info.page_mask;
- head->base = head->agp_info.aper_base;
- return head;
-}
-
-/** Calls agp_allocate_memory() */
-DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
- size_t pages, u32 type)
-{
- return agp_allocate_memory(bridge, pages, type);
-}
-
-/** Calls agp_free_memory() */
-int drm_agp_free_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return 0;
- agp_free_memory(handle);
- return 1;
-}
-
-/** Calls agp_bind_memory() */
-int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
-{
- if (!handle)
- return -EINVAL;
- return agp_bind_memory(handle, start);
-}
-
-/** Calls agp_unbind_memory() */
-int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return -EINVAL;
- return agp_unbind_memory(handle);
-}
-
-/**
- * Binds a collection of pages into AGP memory at the given offset, returning
- * the AGP memory structure containing them.
- *
- * No reference is held on the pages during this time -- it is up to the
- * caller to handle that.
- */
-DRM_AGP_MEM *
-drm_agp_bind_pages(struct drm_device *dev,
- struct page **pages,
- unsigned long num_pages,
- uint32_t gtt_offset,
- u32 type)
-{
- DRM_AGP_MEM *mem;
- int ret, i;
-
- DRM_DEBUG("\n");
-
- mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
- type);
- if (mem == NULL) {
- DRM_ERROR("Failed to allocate memory for %ld pages\n",
- num_pages);
- return NULL;
- }
-
- for (i = 0; i < num_pages; i++)
- mem->pages[i] = pages[i];
- mem->page_count = num_pages;
-
- mem->is_flushed = true;
- ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
- if (ret != 0) {
- DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
- agp_free_memory(mem);
- return NULL;
- }
-
- return mem;
-}
-EXPORT_SYMBOL(drm_agp_bind_pages);
-
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
- agp_flush_chipset(dev->agp->bridge);
-}
-EXPORT_SYMBOL(drm_agp_chipset_flush);
-
-#endif /* __OS_HAS_AGP */
+++ /dev/null
-/**
- * \file drm_auth.c
- * IOCTLs for authentication
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Find the file with the given magic number.
- *
- * \param dev DRM device.
- * \param magic magic number.
- *
- * Searches in drm_device::magiclist within all files with the same hash key
- * the one with matching magic number, while holding the drm_device::struct_mutex
- * lock.
- */
-static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
-{
- struct drm_file *retval = NULL;
- struct drm_magic_entry *pt;
- struct drm_hash_item *hash;
- struct drm_device *dev = master->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
- pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
- retval = pt->priv;
- }
- mutex_unlock(&dev->struct_mutex);
- return retval;
-}
-
-/**
- * Adds a magic number.
- *
- * \param dev DRM device.
- * \param priv file private data.
- * \param magic magic number.
- *
- * Creates a drm_magic_entry structure and appends to the linked list
- * associated the magic number hash key in drm_device::magiclist, while holding
- * the drm_device::struct_mutex lock.
- */
-static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
- drm_magic_t magic)
-{
- struct drm_magic_entry *entry;
- struct drm_device *dev = master->minor->dev;
- DRM_DEBUG("%d\n", magic);
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
- entry->priv = priv;
- entry->hash_item.key = (unsigned long)magic;
- mutex_lock(&dev->struct_mutex);
- drm_ht_insert_item(&master->magiclist, &entry->hash_item);
- list_add_tail(&entry->head, &master->magicfree);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Remove a magic number.
- *
- * \param dev DRM device.
- * \param magic magic number.
- *
- * Searches and unlinks the entry in drm_device::magiclist with the magic
- * number hash key, while holding the drm_device::struct_mutex lock.
- */
-static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
-{
- struct drm_magic_entry *pt;
- struct drm_hash_item *hash;
- struct drm_device *dev = master->minor->dev;
-
- DRM_DEBUG("%d\n", magic);
-
- mutex_lock(&dev->struct_mutex);
- if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
- pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
- drm_ht_remove_item(&master->magiclist, hash);
- list_del(&pt->head);
- mutex_unlock(&dev->struct_mutex);
-
- kfree(pt);
-
- return 0;
-}
-
-/**
- * Get a unique magic number (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a resulting drm_auth structure.
- * \return zero on success, or a negative number on failure.
- *
- * If there is a magic number in drm_file::magic then use it, otherwise
- * searches an unique non-zero magic number and add it associating it with \p
- * file_priv.
- */
-int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- static drm_magic_t sequence = 0;
- static DEFINE_SPINLOCK(lock);
- struct drm_auth *auth = data;
-
- /* Find unique magic */
- if (file_priv->magic) {
- auth->magic = file_priv->magic;
- } else {
- do {
- spin_lock(&lock);
- if (!sequence)
- ++sequence; /* reserve 0 */
- auth->magic = sequence++;
- spin_unlock(&lock);
- } while (drm_find_file(file_priv->master, auth->magic));
- file_priv->magic = auth->magic;
- drm_add_magic(file_priv->master, file_priv, auth->magic);
- }
-
- DRM_DEBUG("%u\n", auth->magic);
-
- return 0;
-}
-
-/**
- * Authenticate with a magic.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_auth structure.
- * \return zero if authentication successed, or a negative number otherwise.
- *
- * Checks if \p file_priv is associated with the magic number passed in \arg.
- */
-int drm_authmagic(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_auth *auth = data;
- struct drm_file *file;
-
- DRM_DEBUG("%u\n", auth->magic);
- if ((file = drm_find_file(file_priv->master, auth->magic))) {
- file->authenticated = 1;
- drm_remove_magic(file_priv->master, auth->magic);
- return 0;
- }
- return -EINVAL;
-}
+++ /dev/null
-/**
- * \file drm_bufs.c
- * Generic buffer template
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/log2.h>
-#include <asm/shmparam.h>
-#include "drmP.h"
-
-resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
-{
- return pci_resource_start(dev->pdev, resource);
-}
-EXPORT_SYMBOL(drm_get_resource_start);
-
-resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
-{
- return pci_resource_len(dev->pdev, resource);
-}
-
-EXPORT_SYMBOL(drm_get_resource_len);
-
-static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
- struct drm_local_map *map)
-{
- struct drm_map_list *entry;
- list_for_each_entry(entry, &dev->maplist, head) {
- /*
- * Because the kernel-userspace ABI is fixed at a 32-bit offset
- * while PCI resources may live above that, we ignore the map
- * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
- * It is assumed that each driver will have only one resource of
- * each type.
- */
- if (!entry->map ||
- map->type != entry->map->type ||
- entry->master != dev->primary->master)
- continue;
- switch (map->type) {
- case _DRM_SHM:
- if (map->flags != _DRM_CONTAINS_LOCK)
- break;
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- return entry;
- default: /* Make gcc happy */
- ;
- }
- if (entry->map->offset == map->offset)
- return entry;
- }
-
- return NULL;
-}
-
-static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
- unsigned long user_token, int hashed_handle, int shm)
-{
- int use_hashed_handle, shift;
- unsigned long add;
-
-#if (BITS_PER_LONG == 64)
- use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
-#elif (BITS_PER_LONG == 32)
- use_hashed_handle = hashed_handle;
-#else
-#error Unsupported long size. Neither 64 nor 32 bits.
-#endif
-
- if (!use_hashed_handle) {
- int ret;
- hash->key = user_token >> PAGE_SHIFT;
- ret = drm_ht_insert_item(&dev->map_hash, hash);
- if (ret != -EINVAL)
- return ret;
- }
-
- shift = 0;
- add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
- if (shm && (SHMLBA > PAGE_SIZE)) {
- int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
-
- /* For shared memory, we have to preserve the SHMLBA
- * bits of the eventual vma->vm_pgoff value during
- * mmap(). Otherwise we run into cache aliasing problems
- * on some platforms. On these platforms, the pgoff of
- * a mmap() request is used to pick a suitable virtual
- * address for the mmap() region such that it will not
- * cause cache aliasing problems.
- *
- * Therefore, make sure the SHMLBA relevant bits of the
- * hash value we use are equal to those in the original
- * kernel virtual address.
- */
- shift = bits;
- add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
- }
-
- return drm_ht_just_insert_please(&dev->map_hash, hash,
- user_token, 32 - PAGE_SHIFT - 3,
- shift, add);
-}
-
-/**
- * Core function to create a range of memory available for mapping by a
- * non-root process.
- *
- * Adjusts the memory offset to its absolute value according to the mapping
- * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
- * applicable and if supported by the kernel.
- */
-static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags,
- struct drm_map_list ** maplist)
-{
- struct drm_local_map *map;
- struct drm_map_list *list;
- drm_dma_handle_t *dmah;
- unsigned long user_token;
- int ret;
-
- map = kmalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- return -ENOMEM;
-
- map->offset = offset;
- map->size = size;
- map->flags = flags;
- map->type = type;
-
- /* Only allow shared memory to be removable since we only keep enough
- * book keeping information about shared memory to allow for removal
- * when processes fork.
- */
- if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
- kfree(map);
- return -EINVAL;
- }
- DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
- (unsigned long long)map->offset, map->size, map->type);
-
- /* page-align _DRM_SHM maps. They are allocated here so there is no security
- * hole created by that and it works around various broken drivers that use
- * a non-aligned quantity to map the SAREA. --BenH
- */
- if (map->type == _DRM_SHM)
- map->size = PAGE_ALIGN(map->size);
-
- if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
- kfree(map);
- return -EINVAL;
- }
- map->mtrr = -1;
- map->handle = NULL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
- if (map->offset + (map->size-1) < map->offset ||
- map->offset < virt_to_phys(high_memory)) {
- kfree(map);
- return -EINVAL;
- }
-#endif
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
- /* Some drivers preinitialize some maps, without the X Server
- * needing to be aware of it. Therefore, we just return success
- * when the server tries to create a duplicate map.
- */
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if (list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size,
- list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
-
- if (drm_core_has_MTRR(dev)) {
- if (map->type == _DRM_FRAME_BUFFER ||
- (map->flags & _DRM_WRITE_COMBINING)) {
- map->mtrr = mtrr_add(map->offset, map->size,
- MTRR_TYPE_WRCOMB, 1);
- }
- }
- if (map->type == _DRM_REGISTERS) {
- map->handle = ioremap(map->offset, map->size);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- }
-
- break;
- case _DRM_SHM:
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if(list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size, list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
- map->handle = vmalloc_user(map->size);
- DRM_DEBUG("%lu %d %p\n",
- map->size, drm_order(map->size), map->handle);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- map->offset = (unsigned long)map->handle;
- if (map->flags & _DRM_CONTAINS_LOCK) {
- /* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->primary->master->lock.hw_lock != NULL) {
- vfree(map->handle);
- kfree(map);
- return -EBUSY;
- }
- dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
- }
- break;
- case _DRM_AGP: {
- struct drm_agp_mem *entry;
- int valid = 0;
-
- if (!drm_core_has_AGP(dev)) {
- kfree(map);
- return -EINVAL;
- }
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
- /* In some cases (i810 driver), user space may have already
- * added the AGP base itself, because dev->agp->base previously
- * only got set during AGP enable. So, only add the base
- * address if the map's offset isn't already within the
- * aperture.
- */
- if (map->offset < dev->agp->base ||
- map->offset > dev->agp->base +
- dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
- map->offset += dev->agp->base;
- }
- map->mtrr = dev->agp->agp_mtrr; /* for getmap */
-
- /* This assumes the DRM is in total control of AGP space.
- * It's not always the case as AGP can be in the control
- * of user space (i.e. i810 driver). So this loop will get
- * skipped and we double check that dev->agp->memory is
- * actually set as well as being invalid before EPERM'ing
- */
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if ((map->offset >= entry->bound) &&
- (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- kfree(map);
- return -EPERM;
- }
- DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
- (unsigned long long)map->offset, map->size);
-
- break;
- }
- case _DRM_GEM:
- DRM_ERROR("tried to addmap GEM object\n");
- break;
- case _DRM_SCATTER_GATHER:
- if (!dev->sg) {
- kfree(map);
- return -EINVAL;
- }
- map->offset += (unsigned long)dev->sg->virtual;
- break;
- case _DRM_CONSISTENT:
- /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
- * As we're limiting the address to 2^32-1 (or less),
- * casting it down to 32 bits is no problem, but we
- * need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
- if (!dmah) {
- kfree(map);
- return -ENOMEM;
- }
- map->handle = dmah->vaddr;
- map->offset = (unsigned long)dmah->busaddr;
- kfree(dmah);
- break;
- default:
- kfree(map);
- return -EINVAL;
- }
-
- list = kmalloc(sizeof(*list), GFP_KERNEL);
- if (!list) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- return -EINVAL;
- }
- memset(list, 0, sizeof(*list));
- list->map = map;
-
- mutex_lock(&dev->struct_mutex);
- list_add(&list->head, &dev->maplist);
-
- /* Assign a 32-bit handle */
- /* We do it here so that dev->struct_mutex protects the increment */
- user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
- map->offset;
- ret = drm_map_handle(dev, &list->hash, user_token, 0,
- (map->type == _DRM_SHM));
- if (ret) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- kfree(list);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- list->user_token = list->hash.key << PAGE_SHIFT;
- mutex_unlock(&dev->struct_mutex);
-
- if (!(map->flags & _DRM_DRIVER))
- list->master = dev->primary->master;
- *maplist = list;
- return 0;
- }
-
-int drm_addmap(struct drm_device * dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map ** map_ptr)
-{
- struct drm_map_list *list;
- int rc;
-
- rc = drm_addmap_core(dev, offset, size, type, flags, &list);
- if (!rc)
- *map_ptr = list->map;
- return rc;
-}
-
-EXPORT_SYMBOL(drm_addmap);
-
-/**
- * Ioctl to specify a range of memory that is available for mapping by a
- * non-root process.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_map structure.
- * \return zero on success or a negative value on error.
- *
- */
-int drm_addmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *maplist;
- int err;
-
- if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
- return -EPERM;
-
- err = drm_addmap_core(dev, map->offset, map->size, map->type,
- map->flags, &maplist);
-
- if (err)
- return err;
-
- /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
- map->handle = (void *)(unsigned long)maplist->user_token;
- return 0;
-}
-
-/**
- * Remove a map private from list and deallocate resources if the mapping
- * isn't in use.
- *
- * Searches the map on drm_device::maplist, removes it from the list, see if
- * its being used, and free any associate resource (such as MTRR's) if it's not
- * being on use.
- *
- * \sa drm_addmap
- */
-int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
-{
- struct drm_map_list *r_list = NULL, *list_t;
- drm_dma_handle_t dmah;
- int found = 0;
- struct drm_master *master;
-
- /* Find the list entry for the map and remove it */
- list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
- if (r_list->map == map) {
- master = r_list->master;
- list_del(&r_list->head);
- drm_ht_remove_key(&dev->map_hash,
- r_list->user_token >> PAGE_SHIFT);
- kfree(r_list);
- found = 1;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- iounmap(map->handle);
- /* FALLTHROUGH */
- case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
- int retcode;
- retcode = mtrr_del(map->mtrr, map->offset, map->size);
- DRM_DEBUG("mtrr_del=%d\n", retcode);
- }
- break;
- case _DRM_SHM:
- vfree(map->handle);
- if (master) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL; /* SHM removed */
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_pci_free(dev, &dmah);
- break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
- }
- kfree(map);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_rmmap_locked);
-
-int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
-{
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm_rmmap_locked(dev, map);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_rmmap);
-
-/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
- * the last close of the device, and this is necessary for cleanup when things
- * exit uncleanly. Therefore, having userland manually remove mappings seems
- * like a pointless exercise since they're going away anyway.
- *
- * One use case might be after addmap is allowed for normal users for SHM and
- * gets used by drivers that the server doesn't need to care about. This seems
- * unlikely.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_map structure.
- * \return zero on success or a negative value on error.
- */
-int drm_rmmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->user_token == (unsigned long)request->handle &&
- r_list->map->flags & _DRM_REMOVABLE) {
- map = r_list->map;
- break;
- }
- }
-
- /* List has wrapped around to the head pointer, or its empty we didn't
- * find anything.
- */
- if (list_empty(&dev->maplist) || !map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- /* Register and framebuffer maps are permanent */
- if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- ret = drm_rmmap_locked(dev, map);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-/**
- * Cleanup after an error on one of the addbufs() functions.
- *
- * \param dev DRM device.
- * \param entry buffer entry where the error occurred.
- *
- * Frees any pages and buffers associated with the given entry.
- */
-static void drm_cleanup_buf_error(struct drm_device * dev,
- struct drm_buf_entry * entry)
-{
- int i;
-
- if (entry->seg_count) {
- for (i = 0; i < entry->seg_count; i++) {
- if (entry->seglist[i]) {
- drm_pci_free(dev, entry->seglist[i]);
- }
- }
- kfree(entry->seglist);
-
- entry->seg_count = 0;
- }
-
- if (entry->buf_count) {
- for (i = 0; i < entry->buf_count; i++) {
- kfree(entry->buflist[i].dev_private);
- }
- kfree(entry->buflist);
-
- entry->buf_count = 0;
- }
-}
-
-#if __OS_HAS_AGP
-/**
- * Add AGP buffers for DMA transfers.
- *
- * \param dev struct drm_device to which the buffers are to be added.
- * \param request pointer to a struct drm_buf_desc describing the request.
- * \return zero on success or a negative number on failure.
- *
- * After some sanity checks creates a drm_buf structure for each buffer and
- * reallocates the buffer list of the same size order to accommodate the new
- * buffers.
- */
-int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_agp_mem *agp_entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i, valid;
- struct drm_buf **temp_buflist;
-
- if (!dma)
- return -EINVAL;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = dev->agp->base + request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lx\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- /* Make sure buffers are located in AGP memory that we own */
- valid = 0;
- list_for_each_entry(agp_entry, &dev->agp->memory, head) {
- if ((agp_offset >= agp_entry->bound) &&
- (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- DRM_DEBUG("zone invalid\n");
- return -EINVAL;
- }
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_AGP;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-EXPORT_SYMBOL(drm_addbufs_agp);
-#endif /* __OS_HAS_AGP */
-
-int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- int count;
- int order;
- int size;
- int total;
- int page_order;
- struct drm_buf_entry *entry;
- drm_dma_handle_t *dmah;
- struct drm_buf *buf;
- int alignment;
- unsigned long offset;
- int i;
- int byte_count;
- int page_count;
- unsigned long *temp_pagelist;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
- request->count, request->size, size, order, dev->queue_count);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
-
- entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
- if (!entry->seglist) {
- kfree(entry->buflist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->seglist, 0, count * sizeof(*entry->seglist));
-
- /* Keep the original pagelist until we know all the allocations
- * have succeeded
- */
- temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
- sizeof(*dma->pagelist), GFP_KERNEL);
- if (!temp_pagelist) {
- kfree(entry->buflist);
- kfree(entry->seglist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memcpy(temp_pagelist,
- dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
- DRM_DEBUG("pagelist: %d entries\n",
- dma->page_count + (count << page_order));
-
- entry->buf_size = size;
- entry->page_order = page_order;
- byte_count = 0;
- page_count = 0;
-
- while (entry->buf_count < count) {
-
- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
-
- if (!dmah) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- entry->seglist[entry->seg_count++] = dmah;
- for (i = 0; i < (1 << page_order); i++) {
- DRM_DEBUG("page %d @ 0x%08lx\n",
- dma->page_count + page_count,
- (unsigned long)dmah->vaddr + PAGE_SIZE * i);
- temp_pagelist[dma->page_count + page_count++]
- = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
- }
- for (offset = 0;
- offset + size <= total && entry->buf_count < count;
- offset += alignment, ++entry->buf_count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
- buf->offset = (dma->byte_count + byte_count + offset);
- buf->address = (void *)(dmah->vaddr + offset);
- buf->bus_address = dmah->busaddr + offset;
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size,
- GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
- DRM_DEBUG("buffer %d @ %p\n",
- entry->buf_count, buf->address);
- }
- byte_count += PAGE_SIZE << page_order;
- }
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- /* No allocations failed, so now we can replace the orginal pagelist
- * with the new one.
- */
- if (dma->page_count) {
- kfree(dma->pagelist);
- }
- dma->pagelist = temp_pagelist;
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += entry->seg_count << page_order;
- dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- if (request->flags & _DRM_PCI_BUFFER_RO)
- dma->flags = _DRM_DMA_USE_PCI_RO;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-
-}
-EXPORT_SYMBOL(drm_addbufs_pci);
-
-static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kmalloc(count * sizeof(*entry->buflist),
- GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset
- + (unsigned long)dev->sg->virtual);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_SG;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kmalloc(count * sizeof(*entry->buflist),
- GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_FB;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-
-/**
- * Add buffers for DMA transfers (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_buf_desc request.
- * \return zero on success or a negative number on failure.
- *
- * According with the memory type specified in drm_buf_desc::flags and the
- * build options, it dispatches the call either to addbufs_agp(),
- * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
- * PCI memory respectively.
- */
-int drm_addbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_desc *request = data;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
-#if __OS_HAS_AGP
- if (request->flags & _DRM_AGP_BUFFER)
- ret = drm_addbufs_agp(dev, request);
- else
-#endif
- if (request->flags & _DRM_SG_BUFFER)
- ret = drm_addbufs_sg(dev, request);
- else if (request->flags & _DRM_FB_BUFFER)
- ret = drm_addbufs_fb(dev, request);
- else
- ret = drm_addbufs_pci(dev, request);
-
- return ret;
-}
-
-/**
- * Get information about the buffer mappings.
- *
- * This was originally mean for debugging purposes, or by a sophisticated
- * client library to determine how best to use the available buffers (e.g.,
- * large buffers can be used for image transfer).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_info structure.
- * \return zero on success or a negative number on failure.
- *
- * Increments drm_device::buf_use while holding the drm_device::count_lock
- * lock, preventing of allocating more buffers after this call. Information
- * about each requested buffer is then copied into user space.
- */
-int drm_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_info *request = data;
- int i;
- int count;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->count_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- ++dev->buf_use; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
-
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count)
- ++count;
- }
-
- DRM_DEBUG("count = %d\n", count);
-
- if (request->count >= count) {
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count) {
- struct drm_buf_desc __user *to =
- &request->list[count];
- struct drm_buf_entry *from = &dma->bufs[i];
- struct drm_freelist *list = &dma->bufs[i].freelist;
- if (copy_to_user(&to->count,
- &from->buf_count,
- sizeof(from->buf_count)) ||
- copy_to_user(&to->size,
- &from->buf_size,
- sizeof(from->buf_size)) ||
- copy_to_user(&to->low_mark,
- &list->low_mark,
- sizeof(list->low_mark)) ||
- copy_to_user(&to->high_mark,
- &list->high_mark,
- sizeof(list->high_mark)))
- return -EFAULT;
-
- DRM_DEBUG("%d %d %d %d %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].buf_size,
- dma->bufs[i].freelist.low_mark,
- dma->bufs[i].freelist.high_mark);
- ++count;
- }
- }
- }
- request->count = count;
-
- return 0;
-}
-
-/**
- * Specifies a low and high water mark for buffer allocation
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg a pointer to a drm_buf_desc structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies that the size order is bounded between the admissible orders and
- * updates the respective drm_device_dma::bufs entry low and high water mark.
- *
- * \note This ioctl is deprecated and mostly never used.
- */
-int drm_markbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_desc *request = data;
- int order;
- struct drm_buf_entry *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d, %d, %d\n",
- request->size, request->low_mark, request->high_mark);
- order = drm_order(request->size);
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- entry = &dma->bufs[order];
-
- if (request->low_mark < 0 || request->low_mark > entry->buf_count)
- return -EINVAL;
- if (request->high_mark < 0 || request->high_mark > entry->buf_count)
- return -EINVAL;
-
- entry->freelist.low_mark = request->low_mark;
- entry->freelist.high_mark = request->high_mark;
-
- return 0;
-}
-
-/**
- * Unreserve the buffers in list, previously reserved using drmDMA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_free structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls free_buffer() for each used buffer.
- * This function is primarily used for debugging.
- */
-int drm_freebufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_free *request = data;
- int i;
- int idx;
- struct drm_buf *buf;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d\n", request->count);
- for (i = 0; i < request->count; i++) {
- if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
- return -EFAULT;
- if (idx < 0 || idx >= dma->buf_count) {
- DRM_ERROR("Index %d (of %d max)\n",
- idx, dma->buf_count - 1);
- return -EINVAL;
- }
- buf = dma->buflist[idx];
- if (buf->file_priv != file_priv) {
- DRM_ERROR("Process %d freeing buffer not owned\n",
- task_pid_nr(current));
- return -EINVAL;
- }
- drm_free_buffer(dev, buf);
- }
-
- return 0;
-}
-
-/**
- * Maps all of the DMA buffers into client-virtual space (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
- * about each buffer into user space. For PCI buffers, it calls do_mmap() with
- * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
- * drm_mmap_dma().
- */
-int drm_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int retcode = 0;
- const int zero = 0;
- unsigned long virtual;
- unsigned long address;
- struct drm_buf_map *request = data;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->count_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- dev->buf_use++; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
-
- if (request->count >= dma->buf_count) {
- if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
- || (drm_core_check_feature(dev, DRIVER_SG)
- && (dma->flags & _DRM_DMA_USE_SG))
- || (drm_core_check_feature(dev, DRIVER_FB_DMA)
- && (dma->flags & _DRM_DMA_USE_FB))) {
- struct drm_local_map *map = dev->agp_buffer_map;
- unsigned long token = dev->agp_buffer_token;
-
- if (!map) {
- retcode = -EINVAL;
- goto done;
- }
- down_write(¤t->mm->mmap_sem);
- virtual = do_mmap(file_priv->filp, 0, map->size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- token);
- up_write(¤t->mm->mmap_sem);
- } else {
- down_write(¤t->mm->mmap_sem);
- virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
- PROT_READ | PROT_WRITE,
- MAP_SHARED, 0);
- up_write(¤t->mm->mmap_sem);
- }
- if (virtual > -1024UL) {
- /* Real error */
- retcode = (signed long)virtual;
- goto done;
- }
- request->virtual = (void __user *)virtual;
-
- for (i = 0; i < dma->buf_count; i++) {
- if (copy_to_user(&request->list[i].idx,
- &dma->buflist[i]->idx,
- sizeof(request->list[0].idx))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request->list[i].total,
- &dma->buflist[i]->total,
- sizeof(request->list[0].total))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request->list[i].used,
- &zero, sizeof(zero))) {
- retcode = -EFAULT;
- goto done;
- }
- address = virtual + dma->buflist[i]->offset; /* *** */
- if (copy_to_user(&request->list[i].address,
- &address, sizeof(address))) {
- retcode = -EFAULT;
- goto done;
- }
- }
- }
- done:
- request->count = dma->buf_count;
- DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
-
- return retcode;
-}
-
-/**
- * Compute size order. Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- *
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order(unsigned long size)
-{
- int order;
- unsigned long tmp;
-
- for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
-
- if (size & (size - 1))
- ++order;
-
- return order;
-}
-EXPORT_SYMBOL(drm_order);
+++ /dev/null
-/**************************************************************************
- *
- * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-
-#if defined(CONFIG_X86)
-static void
-drm_clflush_page(struct page *page)
-{
- uint8_t *page_virtual;
- unsigned int i;
-
- if (unlikely(page == NULL))
- return;
-
- page_virtual = kmap_atomic(page, KM_USER0);
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- clflush(page_virtual + i);
- kunmap_atomic(page_virtual, KM_USER0);
-}
-
-static void drm_cache_flush_clflush(struct page *pages[],
- unsigned long num_pages)
-{
- unsigned long i;
-
- mb();
- for (i = 0; i < num_pages; i++)
- drm_clflush_page(*pages++);
- mb();
-}
-
-static void
-drm_clflush_ipi_handler(void *null)
-{
- wbinvd();
-}
-#endif
-
-void
-drm_clflush_pages(struct page *pages[], unsigned long num_pages)
-{
-
-#if defined(CONFIG_X86)
- if (cpu_has_clflush) {
- drm_cache_flush_clflush(pages, num_pages);
- return;
- }
-
- if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
-
-#elif defined(__powerpc__)
- unsigned long i;
- for (i = 0; i < num_pages; i++) {
- struct page *page = pages[i];
- void *page_virtual;
-
- if (unlikely(page == NULL))
- continue;
-
- page_virtual = kmap_atomic(page, KM_USER0);
- flush_dcache_range((unsigned long)page_virtual,
- (unsigned long)page_virtual + PAGE_SIZE);
- kunmap_atomic(page_virtual, KM_USER0);
- }
-#else
- printk(KERN_ERR "Architecture has no drm_cache.c support\n");
- WARN_ON_ONCE(1);
-#endif
-}
-EXPORT_SYMBOL(drm_clflush_pages);
+++ /dev/null
-/**
- * \file drm_context.c
- * IOCTLs for generic contexts
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * ChangeLog:
- * 2001-11-16 Torsten Duwe <duwe@caldera.de>
- * added context constructor/destructor hooks,
- * needed by SiS driver's memory management.
- */
-
-#include "drmP.h"
-
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-/**
- * Free a handle from the context bitmap.
- *
- * \param dev DRM device.
- * \param ctx_handle context handle.
- *
- * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
- * lock.
- */
-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
-{
- mutex_lock(&dev->struct_mutex);
- idr_remove(&dev->ctx_idr, ctx_handle);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * Context bitmap allocation.
- *
- * \param dev DRM device.
- * \return (non-negative) context handle on success or a negative number on failure.
- *
- * Allocate a new idr from drm_device::ctx_idr while holding the
- * drm_device::struct_mutex lock.
- */
-static int drm_ctxbitmap_next(struct drm_device * dev)
-{
- int new_id;
- int ret;
-
-again:
- if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
- mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&dev->ctx_idr, NULL,
- DRM_RESERVED_CONTEXTS, &new_id);
- if (ret == -EAGAIN) {
- mutex_unlock(&dev->struct_mutex);
- goto again;
- }
- mutex_unlock(&dev->struct_mutex);
- return new_id;
-}
-
-/**
- * Context bitmap initialization.
- *
- * \param dev DRM device.
- *
- * Initialise the drm_device::ctx_idr
- */
-int drm_ctxbitmap_init(struct drm_device * dev)
-{
- idr_init(&dev->ctx_idr);
- return 0;
-}
-
-/**
- * Context bitmap cleanup.
- *
- * \param dev DRM device.
- *
- * Free all idr members using drm_ctx_sarea_free helper function
- * while holding the drm_device::struct_mutex lock.
- */
-void drm_ctxbitmap_cleanup(struct drm_device * dev)
-{
- mutex_lock(&dev->struct_mutex);
- idr_remove_all(&dev->ctx_idr);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name Per Context SAREA Support */
-/*@{*/
-
-/**
- * Get per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Gets the map from drm_device::ctx_idr with the handle specified and
- * returns its handle.
- */
-int drm_getsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map;
- struct drm_map_list *_entry;
-
- mutex_lock(&dev->struct_mutex);
-
- map = idr_find(&dev->ctx_idr, request->ctx_id);
- if (!map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- request->handle = NULL;
- list_for_each_entry(_entry, &dev->maplist, head) {
- if (_entry->map == map) {
- request->handle =
- (void *)(unsigned long)_entry->user_token;
- break;
- }
- }
- if (request->handle == NULL)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * Set per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Searches the mapping specified in \p arg and update the entry in
- * drm_device::ctx_idr with it.
- */
-int drm_setsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list = NULL;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map
- && r_list->user_token == (unsigned long) request->handle)
- goto found;
- }
- bad:
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
-
- found:
- map = r_list->map;
- if (!map)
- goto bad;
-
- if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
- goto bad;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name The actual DRM context handling routines */
-/*@{*/
-
-/**
- * Switch context.
- *
- * \param dev DRM device.
- * \param old old context handle.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Attempt to set drm_device::context_flag.
- */
-static int drm_context_switch(struct drm_device * dev, int old, int new)
-{
- if (test_and_set_bit(0, &dev->context_flag)) {
- DRM_ERROR("Reentering -- FIXME\n");
- return -EBUSY;
- }
-
- DRM_DEBUG("Context switch from %d to %d\n", old, new);
-
- if (new == dev->last_context) {
- clear_bit(0, &dev->context_flag);
- return 0;
- }
-
- return 0;
-}
-
-/**
- * Complete context switch.
- *
- * \param dev DRM device.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Updates drm_device::last_context and drm_device::last_switch. Verifies the
- * hardware lock is held, clears the drm_device::context_flag and wakes up
- * drm_device::context_wait.
- */
-static int drm_context_switch_complete(struct drm_device *dev,
- struct drm_file *file_priv, int new)
-{
- dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
- dev->last_switch = jiffies;
-
- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
- DRM_ERROR("Lock isn't held after context switch\n");
- }
-
- /* If a context switch is ever initiated
- when the kernel holds the lock, release
- that lock here. */
- clear_bit(0, &dev->context_flag);
- wake_up(&dev->context_wait);
-
- return 0;
-}
-
-/**
- * Reserve contexts.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_res structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_resctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_res *res = data;
- struct drm_ctx ctx;
- int i;
-
- if (res->count >= DRM_RESERVED_CONTEXTS) {
- memset(&ctx, 0, sizeof(ctx));
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- ctx.handle = i;
- if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
- return -EFAULT;
- }
- }
- res->count = DRM_RESERVED_CONTEXTS;
-
- return 0;
-}
-
-/**
- * Add context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Get a new handle for the context and copy to userspace.
- */
-int drm_addctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_list *ctx_entry;
- struct drm_ctx *ctx = data;
-
- ctx->handle = drm_ctxbitmap_next(dev);
- if (ctx->handle == DRM_KERNEL_CONTEXT) {
- /* Skip kernel's context and get a new one. */
- ctx->handle = drm_ctxbitmap_next(dev);
- }
- DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle == -1) {
- DRM_DEBUG("Not enough free contexts.\n");
- /* Should this return -EBUSY instead? */
- return -ENOMEM;
- }
-
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_ctor)
- if (!dev->driver->context_ctor(dev, ctx->handle)) {
- DRM_DEBUG("Running out of ctxs or memory.\n");
- return -ENOMEM;
- }
- }
-
- ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
- if (!ctx_entry) {
- DRM_DEBUG("out of memory\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&ctx_entry->head);
- ctx_entry->handle = ctx->handle;
- ctx_entry->tag = file_priv;
-
- mutex_lock(&dev->ctxlist_mutex);
- list_add(&ctx_entry->head, &dev->ctxlist);
- ++dev->ctx_count;
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- /* This does nothing */
- return 0;
-}
-
-/**
- * Get context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- /* This is 0, because we don't handle any context flags */
- ctx->flags = 0;
-
- return 0;
-}
-
-/**
- * Switch context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch().
- */
-int drm_switchctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- DRM_DEBUG("%d\n", ctx->handle);
- return drm_context_switch(dev, dev->last_context, ctx->handle);
-}
-
-/**
- * New context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch_complete().
- */
-int drm_newctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- DRM_DEBUG("%d\n", ctx->handle);
- drm_context_switch_complete(dev, file_priv, ctx->handle);
-
- return 0;
-}
-
-/**
- * Remove context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
- */
-int drm_rmctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev, ctx->handle);
- drm_ctxbitmap_free(dev, ctx->handle);
- }
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->handle == ctx->handle) {
- list_del(&pos->head);
- kfree(pos);
- --dev->ctx_count;
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-/*@}*/
+++ /dev/null
-/*
- * Copyright (c) 2006-2008 Intel Corporation
- * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2008 Red Hat Inc.
- *
- * DRM core CRTC related functions
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- *
- * Authors:
- * Keith Packard
- * Eric Anholt <eric@anholt.net>
- * Dave Airlie <airlied@linux.ie>
- * Jesse Barnes <jesse.barnes@intel.com>
- */
-#include <linux/list.h>
-#include "drm.h"
-#include "drmP.h"
-#include "drm_crtc.h"
-
-struct drm_prop_enum_list {
- int type;
- char *name;
-};
-
-/* Avoid boilerplate. I'm tired of typing. */
-#define DRM_ENUM_NAME_FN(fnname, list) \
- char *fnname(int val) \
- { \
- int i; \
- for (i = 0; i < ARRAY_SIZE(list); i++) { \
- if (list[i].type == val) \
- return list[i].name; \
- } \
- return "(unknown)"; \
- }
-
-/*
- * Global properties
- */
-static struct drm_prop_enum_list drm_dpms_enum_list[] =
-{ { DRM_MODE_DPMS_ON, "On" },
- { DRM_MODE_DPMS_STANDBY, "Standby" },
- { DRM_MODE_DPMS_SUSPEND, "Suspend" },
- { DRM_MODE_DPMS_OFF, "Off" }
-};
-
-DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
-
-/*
- * Optional properties
- */
-static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
-{
- { DRM_MODE_SCALE_NONE, "None" },
- { DRM_MODE_SCALE_FULLSCREEN, "Full" },
- { DRM_MODE_SCALE_CENTER, "Center" },
- { DRM_MODE_SCALE_ASPECT, "Full aspect" },
-};
-
-static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
-{
- { DRM_MODE_DITHERING_OFF, "Off" },
- { DRM_MODE_DITHERING_ON, "On" },
-};
-
-/*
- * Non-global properties, but "required" for certain connectors.
- */
-static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
- { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
-
-static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
- { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
- drm_dvi_i_subconnector_enum_list)
-
-static struct drm_prop_enum_list drm_tv_select_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-
-static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
-{
- { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
- drm_tv_subconnector_enum_list)
-
-static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
- { DRM_MODE_DIRTY_OFF, "Off" },
- { DRM_MODE_DIRTY_ON, "On" },
- { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
-};
-
-DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
- drm_dirty_info_enum_list)
-
-struct drm_conn_prop_enum_list {
- int type;
- char *name;
- int count;
-};
-
-/*
- * Connector and encoder types.
- */
-static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
- { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
- { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
- { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
- { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
- { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
- { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
- { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
- { DRM_MODE_CONNECTOR_Component, "Component", 0 },
- { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
- { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
- { DRM_MODE_CONNECTOR_TV, "TV", 0 },
-};
-
-static struct drm_prop_enum_list drm_encoder_enum_list[] =
-{ { DRM_MODE_ENCODER_NONE, "None" },
- { DRM_MODE_ENCODER_DAC, "DAC" },
- { DRM_MODE_ENCODER_TMDS, "TMDS" },
- { DRM_MODE_ENCODER_LVDS, "LVDS" },
- { DRM_MODE_ENCODER_TVDAC, "TV" },
-};
-
-char *drm_get_encoder_name(struct drm_encoder *encoder)
-{
- static char buf[32];
-
- snprintf(buf, 32, "%s-%d",
- drm_encoder_enum_list[encoder->encoder_type].name,
- encoder->base.id);
- return buf;
-}
-EXPORT_SYMBOL(drm_get_encoder_name);
-
-char *drm_get_connector_name(struct drm_connector *connector)
-{
- static char buf[32];
-
- snprintf(buf, 32, "%s-%d",
- drm_connector_enum_list[connector->connector_type].name,
- connector->connector_type_id);
- return buf;
-}
-EXPORT_SYMBOL(drm_get_connector_name);
-
-char *drm_get_connector_status_name(enum drm_connector_status status)
-{
- if (status == connector_status_connected)
- return "connected";
- else if (status == connector_status_disconnected)
- return "disconnected";
- else
- return "unknown";
-}
-
-/**
- * drm_mode_object_get - allocate a new identifier
- * @dev: DRM device
- * @ptr: object pointer, used to generate unique ID
- * @type: object type
- *
- * LOCKING:
- *
- * Create a unique identifier based on @ptr in @dev's identifier space. Used
- * for tracking modes, CRTCs and connectors.
- *
- * RETURNS:
- * New unique (relative to other objects in @dev) integer identifier for the
- * object.
- */
-static int drm_mode_object_get(struct drm_device *dev,
- struct drm_mode_object *obj, uint32_t obj_type)
-{
- int new_id = 0;
- int ret;
-
-again:
- if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Ran out memory getting a mode number\n");
- return -EINVAL;
- }
-
- mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
- mutex_unlock(&dev->mode_config.idr_mutex);
- if (ret == -EAGAIN)
- goto again;
-
- obj->id = new_id;
- obj->type = obj_type;
- return 0;
-}
-
-/**
- * drm_mode_object_put - free an identifer
- * @dev: DRM device
- * @id: ID to free
- *
- * LOCKING:
- * Caller must hold DRM mode_config lock.
- *
- * Free @id from @dev's unique identifier pool.
- */
-static void drm_mode_object_put(struct drm_device *dev,
- struct drm_mode_object *object)
-{
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.crtc_idr, object->id);
- mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
- uint32_t id, uint32_t type)
-{
- struct drm_mode_object *obj = NULL;
-
- mutex_lock(&dev->mode_config.idr_mutex);
- obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (obj->type != type) || (obj->id != id))
- obj = NULL;
- mutex_unlock(&dev->mode_config.idr_mutex);
-
- return obj;
-}
-EXPORT_SYMBOL(drm_mode_object_find);
-
-/**
- * drm_framebuffer_init - initialize a framebuffer
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Allocates an ID for the framebuffer's parent mode object, sets its mode
- * functions & device file and adds it to the master fd list.
- *
- * RETURNS:
- * Zero on success, error code on failure.
- */
-int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
- const struct drm_framebuffer_funcs *funcs)
-{
- int ret;
-
- ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
- if (ret) {
- return ret;
- }
-
- fb->dev = dev;
- fb->funcs = funcs;
- dev->mode_config.num_fb++;
- list_add(&fb->head, &dev->mode_config.fb_list);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_framebuffer_init);
-
-/**
- * drm_framebuffer_cleanup - remove a framebuffer object
- * @fb: framebuffer to remove
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
- * it, setting it to NULL.
- */
-void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
-{
- struct drm_device *dev = fb->dev;
- struct drm_crtc *crtc;
- struct drm_mode_set set;
- int ret;
-
- /* remove from any CRTC */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb == fb) {
- /* should turn off the crtc */
- memset(&set, 0, sizeof(struct drm_mode_set));
- set.crtc = crtc;
- set.fb = NULL;
- ret = crtc->funcs->set_config(&set);
- if (ret)
- DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
- }
- }
-
- drm_mode_object_put(dev, &fb->base);
- list_del(&fb->head);
- dev->mode_config.num_fb--;
-}
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
-
-/**
- * drm_crtc_init - Initialise a new CRTC object
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @funcs: callbacks for the new CRTC
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Inits a new object created as base part of an driver crtc object.
- */
-void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs)
-{
- crtc->dev = dev;
- crtc->funcs = funcs;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
-
- list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
- dev->mode_config.num_crtc++;
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_crtc_init);
-
-/**
- * drm_crtc_cleanup - Cleans up the core crtc usage.
- * @crtc: CRTC to cleanup
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Cleanup @crtc. Removes from drm modesetting space
- * does NOT free object, caller does that.
- */
-void drm_crtc_cleanup(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
-
- if (crtc->gamma_store) {
- kfree(crtc->gamma_store);
- crtc->gamma_store = NULL;
- }
-
- drm_mode_object_put(dev, &crtc->base);
- list_del(&crtc->head);
- dev->mode_config.num_crtc--;
-}
-EXPORT_SYMBOL(drm_crtc_cleanup);
-
-/**
- * drm_mode_probed_add - add a mode to a connector's probed mode list
- * @connector: connector the new mode
- * @mode: mode data
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Add @mode to @connector's mode list for later use.
- */
-void drm_mode_probed_add(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- list_add(&mode->head, &connector->probed_modes);
-}
-EXPORT_SYMBOL(drm_mode_probed_add);
-
-/**
- * drm_mode_remove - remove and free a mode
- * @connector: connector list to modify
- * @mode: mode to remove
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Remove @mode from @connector's mode list, then free it.
- */
-void drm_mode_remove(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- list_del(&mode->head);
- kfree(mode);
-}
-EXPORT_SYMBOL(drm_mode_remove);
-
-/**
- * drm_connector_init - Init a preallocated connector
- * @dev: DRM device
- * @connector: the connector to init
- * @funcs: callbacks for this connector
- * @name: user visible name of the connector
- *
- * LOCKING:
- * Caller must hold @dev's mode_config lock.
- *
- * Initialises a preallocated connector. Connectors should be
- * subclassed as part of driver connector objects.
- */
-void drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type)
-{
- mutex_lock(&dev->mode_config.mutex);
-
- connector->dev = dev;
- connector->funcs = funcs;
- drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
- connector->connector_type = connector_type;
- connector->connector_type_id =
- ++drm_connector_enum_list[connector_type].count; /* TODO */
- INIT_LIST_HEAD(&connector->user_modes);
- INIT_LIST_HEAD(&connector->probed_modes);
- INIT_LIST_HEAD(&connector->modes);
- connector->edid_blob_ptr = NULL;
-
- list_add_tail(&connector->head, &dev->mode_config.connector_list);
- dev->mode_config.num_connector++;
-
- drm_connector_attach_property(connector,
- dev->mode_config.edid_property, 0);
-
- drm_connector_attach_property(connector,
- dev->mode_config.dpms_property, 0);
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_connector_init);
-
-/**
- * drm_connector_cleanup - cleans up an initialised connector
- * @connector: connector to cleanup
- *
- * LOCKING:
- * Caller must hold @dev's mode_config lock.
- *
- * Cleans up the connector but doesn't free the object.
- */
-void drm_connector_cleanup(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *t;
-
- list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
- drm_mode_remove(connector, mode);
-
- list_for_each_entry_safe(mode, t, &connector->modes, head)
- drm_mode_remove(connector, mode);
-
- list_for_each_entry_safe(mode, t, &connector->user_modes, head)
- drm_mode_remove(connector, mode);
-
- kfree(connector->fb_helper_private);
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_put(dev, &connector->base);
- list_del(&connector->head);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_connector_cleanup);
-
-void drm_encoder_init(struct drm_device *dev,
- struct drm_encoder *encoder,
- const struct drm_encoder_funcs *funcs,
- int encoder_type)
-{
- mutex_lock(&dev->mode_config.mutex);
-
- encoder->dev = dev;
-
- drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
- encoder->encoder_type = encoder_type;
- encoder->funcs = funcs;
-
- list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
- dev->mode_config.num_encoder++;
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_encoder_init);
-
-void drm_encoder_cleanup(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_put(dev, &encoder->base);
- list_del(&encoder->head);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_encoder_cleanup);
-
-/**
- * drm_mode_create - create a new display mode
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold DRM mode_config lock.
- *
- * Create a new drm_display_mode, give it an ID, and return it.
- *
- * RETURNS:
- * Pointer to new mode on success, NULL on error.
- */
-struct drm_display_mode *drm_mode_create(struct drm_device *dev)
-{
- struct drm_display_mode *nmode;
-
- nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
- if (!nmode)
- return NULL;
-
- drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
- return nmode;
-}
-EXPORT_SYMBOL(drm_mode_create);
-
-/**
- * drm_mode_destroy - remove a mode
- * @dev: DRM device
- * @mode: mode to remove
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Free @mode's unique identifier, then free it.
- */
-void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
-{
- drm_mode_object_put(dev, &mode->base);
-
- kfree(mode);
-}
-EXPORT_SYMBOL(drm_mode_destroy);
-
-static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
-{
- struct drm_property *edid;
- struct drm_property *dpms;
- int i;
-
- /*
- * Standard properties (apply to all connectors)
- */
- edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
- DRM_MODE_PROP_IMMUTABLE,
- "EDID", 0);
- dev->mode_config.edid_property = edid;
-
- dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
- drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
- drm_dpms_enum_list[i].name);
- dev->mode_config.dpms_property = dpms;
-
- return 0;
-}
-
-/**
- * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
- * @dev: DRM device
- *
- * Called by a driver the first time a DVI-I connector is made.
- */
-int drm_mode_create_dvi_i_properties(struct drm_device *dev)
-{
- struct drm_property *dvi_i_selector;
- struct drm_property *dvi_i_subconnector;
- int i;
-
- if (dev->mode_config.dvi_i_select_subconnector_property)
- return 0;
-
- dvi_i_selector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "select subconnector",
- ARRAY_SIZE(drm_dvi_i_select_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
- drm_property_add_enum(dvi_i_selector, i,
- drm_dvi_i_select_enum_list[i].type,
- drm_dvi_i_select_enum_list[i].name);
- dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
-
- dvi_i_subconnector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE,
- "subconnector",
- ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
- drm_property_add_enum(dvi_i_subconnector, i,
- drm_dvi_i_subconnector_enum_list[i].type,
- drm_dvi_i_subconnector_enum_list[i].name);
- dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
-
-/**
- * drm_create_tv_properties - create TV specific connector properties
- * @dev: DRM device
- * @num_modes: number of different TV formats (modes) supported
- * @modes: array of pointers to strings containing name of each format
- *
- * Called by a driver's TV initialization routine, this function creates
- * the TV specific connector properties for a given device. Caller is
- * responsible for allocating a list of format names and passing them to
- * this routine.
- */
-int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
- char *modes[])
-{
- struct drm_property *tv_selector;
- struct drm_property *tv_subconnector;
- int i;
-
- if (dev->mode_config.tv_select_subconnector_property)
- return 0;
-
- /*
- * Basic connector properties
- */
- tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "select subconnector",
- ARRAY_SIZE(drm_tv_select_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
- drm_property_add_enum(tv_selector, i,
- drm_tv_select_enum_list[i].type,
- drm_tv_select_enum_list[i].name);
- dev->mode_config.tv_select_subconnector_property = tv_selector;
-
- tv_subconnector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE, "subconnector",
- ARRAY_SIZE(drm_tv_subconnector_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
- drm_property_add_enum(tv_subconnector, i,
- drm_tv_subconnector_enum_list[i].type,
- drm_tv_subconnector_enum_list[i].name);
- dev->mode_config.tv_subconnector_property = tv_subconnector;
-
- /*
- * Other, TV specific properties: margins & TV modes.
- */
- dev->mode_config.tv_left_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left margin", 2);
- dev->mode_config.tv_left_margin_property->values[0] = 0;
- dev->mode_config.tv_left_margin_property->values[1] = 100;
-
- dev->mode_config.tv_right_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right margin", 2);
- dev->mode_config.tv_right_margin_property->values[0] = 0;
- dev->mode_config.tv_right_margin_property->values[1] = 100;
-
- dev->mode_config.tv_top_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top margin", 2);
- dev->mode_config.tv_top_margin_property->values[0] = 0;
- dev->mode_config.tv_top_margin_property->values[1] = 100;
-
- dev->mode_config.tv_bottom_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom margin", 2);
- dev->mode_config.tv_bottom_margin_property->values[0] = 0;
- dev->mode_config.tv_bottom_margin_property->values[1] = 100;
-
- dev->mode_config.tv_mode_property =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "mode", num_modes);
- for (i = 0; i < num_modes; i++)
- drm_property_add_enum(dev->mode_config.tv_mode_property, i,
- i, modes[i]);
-
- dev->mode_config.tv_brightness_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "brightness", 2);
- dev->mode_config.tv_brightness_property->values[0] = 0;
- dev->mode_config.tv_brightness_property->values[1] = 100;
-
- dev->mode_config.tv_contrast_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "contrast", 2);
- dev->mode_config.tv_contrast_property->values[0] = 0;
- dev->mode_config.tv_contrast_property->values[1] = 100;
-
- dev->mode_config.tv_flicker_reduction_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "flicker reduction", 2);
- dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
- dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
-
- dev->mode_config.tv_overscan_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "overscan", 2);
- dev->mode_config.tv_overscan_property->values[0] = 0;
- dev->mode_config.tv_overscan_property->values[1] = 100;
-
- dev->mode_config.tv_saturation_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "saturation", 2);
- dev->mode_config.tv_saturation_property->values[0] = 0;
- dev->mode_config.tv_saturation_property->values[1] = 100;
-
- dev->mode_config.tv_hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hue", 2);
- dev->mode_config.tv_hue_property->values[0] = 0;
- dev->mode_config.tv_hue_property->values[1] = 100;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_tv_properties);
-
-/**
- * drm_mode_create_scaling_mode_property - create scaling mode property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_scaling_mode_property(struct drm_device *dev)
-{
- struct drm_property *scaling_mode;
- int i;
-
- if (dev->mode_config.scaling_mode_property)
- return 0;
-
- scaling_mode =
- drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
- ARRAY_SIZE(drm_scaling_mode_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
- drm_property_add_enum(scaling_mode, i,
- drm_scaling_mode_enum_list[i].type,
- drm_scaling_mode_enum_list[i].name);
-
- dev->mode_config.scaling_mode_property = scaling_mode;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
-
-/**
- * drm_mode_create_dithering_property - create dithering property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dithering_property(struct drm_device *dev)
-{
- struct drm_property *dithering_mode;
- int i;
-
- if (dev->mode_config.dithering_mode_property)
- return 0;
-
- dithering_mode =
- drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
- ARRAY_SIZE(drm_dithering_mode_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
- drm_property_add_enum(dithering_mode, i,
- drm_dithering_mode_enum_list[i].type,
- drm_dithering_mode_enum_list[i].name);
- dev->mode_config.dithering_mode_property = dithering_mode;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dithering_property);
-
-/**
- * drm_mode_create_dirty_property - create dirty property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dirty_info_property(struct drm_device *dev)
-{
- struct drm_property *dirty_info;
- int i;
-
- if (dev->mode_config.dirty_info_property)
- return 0;
-
- dirty_info =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE,
- "dirty",
- ARRAY_SIZE(drm_dirty_info_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
- drm_property_add_enum(dirty_info, i,
- drm_dirty_info_enum_list[i].type,
- drm_dirty_info_enum_list[i].name);
- dev->mode_config.dirty_info_property = dirty_info;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
-
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * LOCKING:
- * None, should happen single threaded at init time.
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
- mutex_init(&dev->mode_config.mutex);
- mutex_init(&dev->mode_config.idr_mutex);
- INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
- INIT_LIST_HEAD(&dev->mode_config.crtc_list);
- INIT_LIST_HEAD(&dev->mode_config.connector_list);
- INIT_LIST_HEAD(&dev->mode_config.encoder_list);
- INIT_LIST_HEAD(&dev->mode_config.property_list);
- INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
- idr_init(&dev->mode_config.crtc_idr);
-
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_create_standard_connector_properties(dev);
- mutex_unlock(&dev->mode_config.mutex);
-
- /* Just to be sure */
- dev->mode_config.num_fb = 0;
- dev->mode_config.num_connector = 0;
- dev->mode_config.num_crtc = 0;
- dev->mode_config.num_encoder = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
-int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
-{
- uint32_t total_objects = 0;
-
- total_objects += dev->mode_config.num_crtc;
- total_objects += dev->mode_config.num_connector;
- total_objects += dev->mode_config.num_encoder;
-
- if (total_objects == 0)
- return -EINVAL;
-
- group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
- if (!group->id_list)
- return -ENOMEM;
-
- group->num_crtcs = 0;
- group->num_connectors = 0;
- group->num_encoders = 0;
- return 0;
-}
-
-int drm_mode_group_init_legacy_group(struct drm_device *dev,
- struct drm_mode_group *group)
-{
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
- if ((ret = drm_mode_group_init(dev, group)))
- return ret;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- group->id_list[group->num_crtcs++] = crtc->base.id;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- group->id_list[group->num_crtcs + group->num_encoders++] =
- encoder->base.id;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- group->id_list[group->num_crtcs + group->num_encoders +
- group->num_connectors++] = connector->base.id;
-
- return 0;
-}
-
-/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
- struct drm_connector *connector, *ot;
- struct drm_crtc *crtc, *ct;
- struct drm_encoder *encoder, *enct;
- struct drm_framebuffer *fb, *fbt;
- struct drm_property *property, *pt;
-
- list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
- head) {
- encoder->funcs->destroy(encoder);
- }
-
- list_for_each_entry_safe(connector, ot,
- &dev->mode_config.connector_list, head) {
- connector->funcs->destroy(connector);
- }
-
- list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
- head) {
- drm_property_destroy(dev, property);
- }
-
- list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- fb->funcs->destroy(fb);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
- }
-
-}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
-
-/**
- * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
- * @out: drm_mode_modeinfo struct to return to the user
- * @in: drm_display_mode to use
- *
- * LOCKING:
- * None.
- *
- * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
- * the user.
- */
-void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
- struct drm_display_mode *in)
-{
- out->clock = in->clock;
- out->hdisplay = in->hdisplay;
- out->hsync_start = in->hsync_start;
- out->hsync_end = in->hsync_end;
- out->htotal = in->htotal;
- out->hskew = in->hskew;
- out->vdisplay = in->vdisplay;
- out->vsync_start = in->vsync_start;
- out->vsync_end = in->vsync_end;
- out->vtotal = in->vtotal;
- out->vscan = in->vscan;
- out->vrefresh = in->vrefresh;
- out->flags = in->flags;
- out->type = in->type;
- strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
- out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
-}
-
-/**
- * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
- * @out: drm_display_mode to return to the user
- * @in: drm_mode_modeinfo to use
- *
- * LOCKING:
- * None.
- *
- * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
- * the caller.
- */
-void drm_crtc_convert_umode(struct drm_display_mode *out,
- struct drm_mode_modeinfo *in)
-{
- out->clock = in->clock;
- out->hdisplay = in->hdisplay;
- out->hsync_start = in->hsync_start;
- out->hsync_end = in->hsync_end;
- out->htotal = in->htotal;
- out->hskew = in->hskew;
- out->vdisplay = in->vdisplay;
- out->vsync_start = in->vsync_start;
- out->vsync_end = in->vsync_end;
- out->vtotal = in->vtotal;
- out->vscan = in->vscan;
- out->vrefresh = in->vrefresh;
- out->flags = in->flags;
- out->type = in->type;
- strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
- out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
-}
-
-/**
- * drm_mode_getresources - get graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Construct a set of configuration description structures and return
- * them to the user, including CRTC, connector and framebuffer configuration.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getresources(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_card_res *card_res = data;
- struct list_head *lh;
- struct drm_framebuffer *fb;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int ret = 0;
- int connector_count = 0;
- int crtc_count = 0;
- int fb_count = 0;
- int encoder_count = 0;
- int copied = 0, i;
- uint32_t __user *fb_id;
- uint32_t __user *crtc_id;
- uint32_t __user *connector_id;
- uint32_t __user *encoder_id;
- struct drm_mode_group *mode_group;
-
- mutex_lock(&dev->mode_config.mutex);
-
- /*
- * For the non-control nodes we need to limit the list of resources
- * by IDs in the group list for this node
- */
- list_for_each(lh, &file_priv->fbs)
- fb_count++;
-
- mode_group = &file_priv->master->minor->mode_group;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
-
- list_for_each(lh, &dev->mode_config.crtc_list)
- crtc_count++;
-
- list_for_each(lh, &dev->mode_config.connector_list)
- connector_count++;
-
- list_for_each(lh, &dev->mode_config.encoder_list)
- encoder_count++;
- } else {
-
- crtc_count = mode_group->num_crtcs;
- connector_count = mode_group->num_connectors;
- encoder_count = mode_group->num_encoders;
- }
-
- card_res->max_height = dev->mode_config.max_height;
- card_res->min_height = dev->mode_config.min_height;
- card_res->max_width = dev->mode_config.max_width;
- card_res->min_width = dev->mode_config.min_width;
-
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
-
- /* CRTCs */
- if (card_res->count_crtcs >= crtc_count) {
- copied = 0;
- crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- head) {
- DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id);
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- for (i = 0; i < mode_group->num_crtcs; i++) {
- if (put_user(mode_group->id_list[i],
- crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- card_res->count_crtcs = crtc_count;
-
- /* Encoders */
- if (card_res->count_encoders >= encoder_count) {
- copied = 0;
- encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- head) {
- DRM_DEBUG_KMS("ENCODER ID is %d\n",
- encoder->base.id);
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
- if (put_user(mode_group->id_list[i],
- encoder_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
-
- }
- }
- card_res->count_encoders = encoder_count;
-
- /* Connectors */
- if (card_res->count_connectors >= connector_count) {
- copied = 0;
- connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- DRM_DEBUG_KMS("CONNECTOR ID is %d\n",
- connector->base.id);
- if (put_user(connector->base.id,
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- int start = mode_group->num_crtcs +
- mode_group->num_encoders;
- for (i = start; i < start + mode_group->num_connectors; i++) {
- if (put_user(mode_group->id_list[i],
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- card_res->count_connectors = connector_count;
-
- DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs,
- card_res->count_connectors, card_res->count_encoders);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_getcrtc - get CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Construct a CRTC configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc *crtc_resp = data;
- struct drm_crtc *crtc;
- struct drm_mode_object *obj;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- crtc_resp->x = crtc->x;
- crtc_resp->y = crtc->y;
- crtc_resp->gamma_size = crtc->gamma_size;
- if (crtc->fb)
- crtc_resp->fb_id = crtc->fb->base.id;
- else
- crtc_resp->fb_id = 0;
-
- if (crtc->enabled) {
-
- drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
- crtc_resp->mode_valid = 1;
-
- } else {
- crtc_resp->mode_valid = 0;
- }
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_getconnector - get connector configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Construct a connector configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getconnector(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_get_connector *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_connector *connector;
- struct drm_display_mode *mode;
- int mode_count = 0;
- int props_count = 0;
- int encoders_count = 0;
- int ret = 0;
- int copied = 0;
- int i;
- struct drm_mode_modeinfo u_mode;
- struct drm_mode_modeinfo __user *mode_ptr;
- uint32_t __user *prop_ptr;
- uint64_t __user *prop_values;
- uint32_t __user *encoder_ptr;
-
- memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
-
- DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id);
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, out_resp->connector_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- props_count++;
- }
- }
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- encoders_count++;
- }
- }
-
- if (out_resp->count_modes == 0) {
- connector->funcs->fill_modes(connector,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
- }
-
- /* delayed so we get modes regardless of pre-fill_modes state */
- list_for_each_entry(mode, &connector->modes, head)
- mode_count++;
-
- out_resp->connector_id = connector->base.id;
- out_resp->connector_type = connector->connector_type;
- out_resp->connector_type_id = connector->connector_type_id;
- out_resp->mm_width = connector->display_info.width_mm;
- out_resp->mm_height = connector->display_info.height_mm;
- out_resp->subpixel = connector->display_info.subpixel_order;
- out_resp->connection = connector->status;
- if (connector->encoder)
- out_resp->encoder_id = connector->encoder->base.id;
- else
- out_resp->encoder_id = 0;
-
- /*
- * This ioctl is called twice, once to determine how much space is
- * needed, and the 2nd time to fill it.
- */
- if ((out_resp->count_modes >= mode_count) && mode_count) {
- copied = 0;
- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
- list_for_each_entry(mode, &connector->modes, head) {
- drm_crtc_convert_to_umode(&u_mode, mode);
- if (copy_to_user(mode_ptr + copied,
- &u_mode, sizeof(u_mode))) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- out_resp->count_modes = mode_count;
-
- if ((out_resp->count_props >= props_count) && props_count) {
- copied = 0;
- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- if (put_user(connector->property_ids[i],
- prop_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
-
- if (put_user(connector->property_values[i],
- prop_values + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- out_resp->count_props = props_count;
-
- if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
- copied = 0;
- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- if (put_user(connector->encoder_ids[i],
- encoder_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- out_resp->count_encoders = encoders_count;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_getencoder(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_get_encoder *enc_resp = data;
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, enc_resp->encoder_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- encoder = obj_to_encoder(obj);
-
- if (encoder->crtc)
- enc_resp->crtc_id = encoder->crtc->base.id;
- else
- enc_resp->crtc_id = 0;
- enc_resp->encoder_type = encoder->encoder_type;
- enc_resp->encoder_id = encoder->base.id;
- enc_resp->possible_crtcs = encoder->possible_crtcs;
- enc_resp->possible_clones = encoder->possible_clones;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_setcrtc - set CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Build a new CRTC configuration based on user request.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_setcrtc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_mode_crtc *crtc_req = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc, *crtcfb;
- struct drm_connector **connector_set = NULL, *connector;
- struct drm_framebuffer *fb = NULL;
- struct drm_display_mode *mode = NULL;
- struct drm_mode_set set;
- uint32_t __user *set_connectors_ptr;
- int ret = 0;
- int i;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- if (crtc_req->mode_valid) {
- /* If we have a mode we need a framebuffer. */
- /* If we pass -1, set the mode with the currently bound fb */
- if (crtc_req->fb_id == -1) {
- list_for_each_entry(crtcfb,
- &dev->mode_config.crtc_list, head) {
- if (crtcfb == crtc) {
- DRM_DEBUG_KMS("Using current fb for "
- "setmode\n");
- fb = crtc->fb;
- }
- }
- } else {
- obj = drm_mode_object_find(dev, crtc_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown FB ID%d\n",
- crtc_req->fb_id);
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
- }
-
- mode = drm_mode_create(dev);
- drm_crtc_convert_umode(mode, &crtc_req->mode);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- }
-
- if (crtc_req->count_connectors == 0 && mode) {
- DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
- ret = -EINVAL;
- goto out;
- }
-
- if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
- DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
- crtc_req->count_connectors);
- ret = -EINVAL;
- goto out;
- }
-
- if (crtc_req->count_connectors > 0) {
- u32 out_id;
-
- /* Avoid unbounded kernel memory allocation */
- if (crtc_req->count_connectors > config->num_connector) {
- ret = -EINVAL;
- goto out;
- }
-
- connector_set = kmalloc(crtc_req->count_connectors *
- sizeof(struct drm_connector *),
- GFP_KERNEL);
- if (!connector_set) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < crtc_req->count_connectors; i++) {
- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
- if (get_user(out_id, &set_connectors_ptr[i])) {
- ret = -EFAULT;
- goto out;
- }
-
- obj = drm_mode_object_find(dev, out_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- DRM_DEBUG_KMS("Connector id %d unknown\n",
- out_id);
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- connector_set[i] = connector;
- }
- }
-
- set.crtc = crtc;
- set.x = crtc_req->x;
- set.y = crtc_req->y;
- set.mode = mode;
- set.connectors = connector_set;
- set.num_connectors = crtc_req->count_connectors;
- set.fb = fb;
- ret = crtc->funcs->set_config(&set);
-
-out:
- kfree(connector_set);
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_cursor_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_cursor *req = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- int ret = 0;
-
- if (!req->flags) {
- DRM_ERROR("no operation set\n");
- return -EINVAL;
- }
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- if (req->flags & DRM_MODE_CURSOR_BO) {
- if (!crtc->funcs->cursor_set) {
- DRM_ERROR("crtc does not support cursor\n");
- ret = -ENXIO;
- goto out;
- }
- /* Turns off the cursor if handle is 0 */
- ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
- req->width, req->height);
- }
-
- if (req->flags & DRM_MODE_CURSOR_MOVE) {
- if (crtc->funcs->cursor_move) {
- ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
- } else {
- DRM_ERROR("crtc does not support cursor\n");
- ret = -EFAULT;
- goto out;
- }
- }
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_addfb - add an FB to the graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Add a new FB to the specified CRTC, given a user request.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_fb_cmd *r = data;
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_framebuffer *fb;
- int ret = 0;
-
- if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_ERROR("mode new framebuffer width not within limits\n");
- return -EINVAL;
- }
- if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_ERROR("mode new framebuffer height not within limits\n");
- return -EINVAL;
- }
-
- mutex_lock(&dev->mode_config.mutex);
-
- /* TODO check buffer is sufficently large */
- /* TODO setup destructor callback */
-
- fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
- if (!fb) {
- DRM_ERROR("could not create framebuffer\n");
- ret = -EINVAL;
- goto out;
- }
-
- r->fb_id = fb->base.id;
- list_add(&fb->filp_head, &file_priv->fbs);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_rmfb - remove an FB from the configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Remove the FB specified by the user.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb = NULL;
- struct drm_framebuffer *fbl = NULL;
- uint32_t *id = data;
- int ret = 0;
- int found = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
- /* TODO check that we realy get a framebuffer back. */
- if (!obj) {
- DRM_ERROR("mode invalid framebuffer id\n");
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
-
- list_for_each_entry(fbl, &file_priv->fbs, filp_head)
- if (fb == fbl)
- found = 1;
-
- if (!found) {
- DRM_ERROR("tried to remove a fb that we didn't own\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* TODO release all crtc connected to the framebuffer */
- /* TODO unhock the destructor from the buffer object */
-
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-/**
- * drm_mode_getfb - get FB info
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
- *
- * Lookup the FB given its ID and return info about it.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_getfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_fb_cmd *r = data;
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
-
- r->height = fb->height;
- r->width = fb->width;
- r->depth = fb->depth;
- r->bpp = fb->bits_per_pixel;
- r->pitch = fb->pitch;
- fb->funcs->create_handle(fb, file_priv, &r->handle);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_clip_rect __user *clips_ptr;
- struct drm_clip_rect *clips = NULL;
- struct drm_mode_fb_dirty_cmd *r = data;
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb;
- unsigned flags;
- int num_clips;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
- ret = -EINVAL;
- goto out_err1;
- }
- fb = obj_to_fb(obj);
-
- num_clips = r->num_clips;
- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
-
- if (!num_clips != !clips_ptr) {
- ret = -EINVAL;
- goto out_err1;
- }
-
- flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
-
- /* If userspace annotates copy, clips must come in pairs */
- if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
- ret = -EINVAL;
- goto out_err1;
- }
-
- if (num_clips && clips_ptr) {
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
- if (!clips) {
- ret = -ENOMEM;
- goto out_err1;
- }
-
- ret = copy_from_user(clips, clips_ptr,
- num_clips * sizeof(*clips));
- if (ret)
- goto out_err2;
- }
-
- if (fb->funcs->dirty) {
- ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
- } else {
- ret = -ENOSYS;
- goto out_err2;
- }
-
-out_err2:
- kfree(clips);
-out_err1:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-
-/**
- * drm_fb_release - remove and free the FBs on this file
- * @filp: file * from the ioctl
- *
- * LOCKING:
- * Takes mode config lock.
- *
- * Destroy all the FBs associated with @filp.
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-void drm_fb_release(struct drm_file *priv)
-{
- struct drm_device *dev = priv->minor->dev;
- struct drm_framebuffer *fb, *tfb;
-
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
- }
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-/**
- * drm_mode_attachmode - add a mode to the user mode list
- * @dev: DRM device
- * @connector: connector to add the mode to
- * @mode: mode to add
- *
- * Add @mode to @connector's user mode list.
- */
-static int drm_mode_attachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- int ret = 0;
-
- list_add_tail(&mode->head, &connector->user_modes);
- return ret;
-}
-
-int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_connector *connector;
- int ret = 0;
- struct drm_display_mode *dup_mode;
- int need_dup = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder)
- break;
- if (connector->encoder->crtc == crtc) {
- if (need_dup)
- dup_mode = drm_mode_duplicate(dev, mode);
- else
- dup_mode = mode;
- ret = drm_mode_attachmode(dev, connector, dup_mode);
- if (ret)
- return ret;
- need_dup = 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_attachmode_crtc);
-
-static int drm_mode_detachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- int found = 0;
- int ret = 0;
- struct drm_display_mode *match_mode, *t;
-
- list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
- if (drm_mode_equal(match_mode, mode)) {
- list_del(&match_mode->head);
- drm_mode_destroy(dev, match_mode);
- found = 1;
- break;
- }
- }
-
- if (!found)
- ret = -EINVAL;
-
- return ret;
-}
-
-int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
-{
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_mode_detachmode(dev, connector, mode);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_detachmode_crtc);
-
-/**
- * drm_fb_attachmode - Attach a user mode to an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * This attaches a user specified mode to an connector.
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_attachmode_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_mode_cmd *mode_cmd = data;
- struct drm_connector *connector;
- struct drm_display_mode *mode;
- struct drm_mode_object *obj;
- struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- mode = drm_mode_create(dev);
- if (!mode) {
- ret = -ENOMEM;
- goto out;
- }
-
- drm_crtc_convert_umode(mode, umode);
-
- ret = drm_mode_attachmode(dev, connector, mode);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-
-/**
- * drm_fb_detachmode - Detach a user specified mode from an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_detachmode_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_mode_mode_cmd *mode_cmd = data;
- struct drm_connector *connector;
- struct drm_display_mode mode;
- struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- drm_crtc_convert_umode(&mode, umode);
- ret = drm_mode_detachmode(dev, connector, &mode);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
- const char *name, int num_values)
-{
- struct drm_property *property = NULL;
-
- property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
- if (!property)
- return NULL;
-
- if (num_values) {
- property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
- if (!property->values)
- goto fail;
- }
-
- drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
- property->flags = flags;
- property->num_values = num_values;
- INIT_LIST_HEAD(&property->enum_blob_list);
-
- if (name)
- strncpy(property->name, name, DRM_PROP_NAME_LEN);
-
- list_add_tail(&property->head, &dev->mode_config.property_list);
- return property;
-fail:
- kfree(property);
- return NULL;
-}
-EXPORT_SYMBOL(drm_property_create);
-
-int drm_property_add_enum(struct drm_property *property, int index,
- uint64_t value, const char *name)
-{
- struct drm_property_enum *prop_enum;
-
- if (!(property->flags & DRM_MODE_PROP_ENUM))
- return -EINVAL;
-
- if (!list_empty(&property->enum_blob_list)) {
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
- if (prop_enum->value == value) {
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
- return 0;
- }
- }
- }
-
- prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
- if (!prop_enum)
- return -ENOMEM;
-
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
- prop_enum->value = value;
-
- property->values[index] = value;
- list_add_tail(&prop_enum->head, &property->enum_blob_list);
- return 0;
-}
-EXPORT_SYMBOL(drm_property_add_enum);
-
-void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
-{
- struct drm_property_enum *prop_enum, *pt;
-
- list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
- list_del(&prop_enum->head);
- kfree(prop_enum);
- }
-
- if (property->num_values)
- kfree(property->values);
- drm_mode_object_put(dev, &property->base);
- list_del(&property->head);
- kfree(property);
-}
-EXPORT_SYMBOL(drm_property_destroy);
-
-int drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == 0) {
- connector->property_ids[i] = property->base.id;
- connector->property_values[i] = init_val;
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t value)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- connector->property_values[i] = value;
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t *val)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- *val = connector->property_values[i];
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
-int drm_mode_getproperty_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_mode_get_property *out_resp = data;
- struct drm_property *property;
- int enum_count = 0;
- int blob_count = 0;
- int value_count = 0;
- int ret = 0, i;
- int copied;
- struct drm_property_enum *prop_enum;
- struct drm_mode_property_enum __user *enum_ptr;
- struct drm_property_blob *prop_blob;
- uint32_t *blob_id_ptr;
- uint64_t __user *values_ptr;
- uint32_t __user *blob_length_ptr;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
- ret = -EINVAL;
- goto done;
- }
- property = obj_to_property(obj);
-
- if (property->flags & DRM_MODE_PROP_ENUM) {
- list_for_each_entry(prop_enum, &property->enum_blob_list, head)
- enum_count++;
- } else if (property->flags & DRM_MODE_PROP_BLOB) {
- list_for_each_entry(prop_blob, &property->enum_blob_list, head)
- blob_count++;
- }
-
- value_count = property->num_values;
-
- strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
- out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
- out_resp->flags = property->flags;
-
- if ((out_resp->count_values >= value_count) && value_count) {
- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
- for (i = 0; i < value_count; i++) {
- if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
- ret = -EFAULT;
- goto done;
- }
- }
- }
- out_resp->count_values = value_count;
-
- if (property->flags & DRM_MODE_PROP_ENUM) {
- if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
- copied = 0;
- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
-
- if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
- ret = -EFAULT;
- goto done;
- }
-
- if (copy_to_user(&enum_ptr[copied].name,
- &prop_enum->name, DRM_PROP_NAME_LEN)) {
- ret = -EFAULT;
- goto done;
- }
- copied++;
- }
- }
- out_resp->count_enum_blobs = enum_count;
- }
-
- if (property->flags & DRM_MODE_PROP_BLOB) {
- if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
- copied = 0;
- blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
-
- list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
- if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
- ret = -EFAULT;
- goto done;
- }
-
- if (put_user(prop_blob->length, blob_length_ptr + copied)) {
- ret = -EFAULT;
- goto done;
- }
-
- copied++;
- }
- }
- out_resp->count_enum_blobs = blob_count;
- }
-done:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
- void *data)
-{
- struct drm_property_blob *blob;
-
- if (!length || !data)
- return NULL;
-
- blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
- if (!blob)
- return NULL;
-
- blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
- blob->length = length;
-
- memcpy(blob->data, data, length);
-
- drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
-
- list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
- return blob;
-}
-
-static void drm_property_destroy_blob(struct drm_device *dev,
- struct drm_property_blob *blob)
-{
- drm_mode_object_put(dev, &blob->base);
- list_del(&blob->head);
- kfree(blob);
-}
-
-int drm_mode_getblob_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_mode_get_blob *out_resp = data;
- struct drm_property_blob *blob;
- int ret = 0;
- void *blob_ptr;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
- if (!obj) {
- ret = -EINVAL;
- goto done;
- }
- blob = obj_to_blob(obj);
-
- if (out_resp->length == blob->length) {
- blob_ptr = (void *)(unsigned long)out_resp->data;
- if (copy_to_user(blob_ptr, blob->data, blob->length)){
- ret = -EFAULT;
- goto done;
- }
- }
- out_resp->length = blob->length;
-
-done:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- int ret = 0;
-
- if (connector->edid_blob_ptr)
- drm_property_destroy_blob(dev, connector->edid_blob_ptr);
-
- /* Delete edid, when there is none. */
- if (!edid) {
- connector->edid_blob_ptr = NULL;
- ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
- return ret;
- }
-
- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
-
- ret = drm_connector_property_set_value(connector,
- dev->mode_config.edid_property,
- connector->edid_blob_ptr->base.id);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
-
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_connector_set_property *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_property *property;
- struct drm_connector *connector;
- int ret = -EINVAL;
- int i;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- goto out;
- }
- connector = obj_to_connector(obj);
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == out_resp->prop_id)
- break;
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY) {
- goto out;
- }
-
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
- goto out;
- }
- property = obj_to_property(obj);
-
- if (property->flags & DRM_MODE_PROP_IMMUTABLE)
- goto out;
-
- if (property->flags & DRM_MODE_PROP_RANGE) {
- if (out_resp->value < property->values[0])
- goto out;
-
- if (out_resp->value > property->values[1])
- goto out;
- } else {
- int found = 0;
- for (i = 0; i < property->num_values; i++) {
- if (property->values[i] == out_resp->value) {
- found = 1;
- break;
- }
- }
- if (!found) {
- goto out;
- }
- }
-
- /* Do DPMS ourselves */
- if (property == connector->dev->mode_config.dpms_property) {
- if (connector->funcs->dpms)
- (*connector->funcs->dpms)(connector, (int) out_resp->value);
- ret = 0;
- } else if (connector->funcs->set_property)
- ret = connector->funcs->set_property(connector, property, out_resp->value);
-
- /* store the property value if successful */
- if (!ret)
- drm_connector_property_set_value(connector, property, out_resp->value);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
-{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0) {
- connector->encoder_ids[i] = encoder->base.id;
- return 0;
- }
- }
- return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
-
-void drm_mode_connector_detach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
-{
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == encoder->base.id) {
- connector->encoder_ids[i] = 0;
- if (connector->encoder == encoder)
- connector->encoder = NULL;
- break;
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
-
-bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
- int gamma_size)
-{
- crtc->gamma_size = gamma_size;
-
- crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
- if (!crtc->gamma_store) {
- crtc->gamma_size = 0;
- return false;
- }
-
- return true;
-}
-EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
-
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- void *r_base, *g_base, *b_base;
- int size;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- /* memcpy into gamma store */
- if (crtc_lut->gamma_size != crtc->gamma_size) {
- ret = -EINVAL;
- goto out;
- }
-
- size = crtc_lut->gamma_size * (sizeof(uint16_t));
- r_base = crtc->gamma_store;
- if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- g_base = r_base + size;
- if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- b_base = g_base + size;
- if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-
-}
-
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- void *r_base, *g_base, *b_base;
- int size;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- crtc = obj_to_crtc(obj);
-
- /* memcpy into gamma store */
- if (crtc_lut->gamma_size != crtc->gamma_size) {
- ret = -EINVAL;
- goto out;
- }
-
- size = crtc_lut->gamma_size * (sizeof(uint16_t));
- r_base = crtc->gamma_store;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- g_base = r_base + size;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- b_base = g_base + size;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_page_flip *page_flip = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
- struct drm_pending_vblank_event *e = NULL;
- unsigned long flags;
- int ret = -EINVAL;
-
- if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
- page_flip->reserved != 0)
- return -EINVAL;
-
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj)
- goto out;
- crtc = obj_to_crtc(obj);
-
- if (crtc->funcs->page_flip == NULL)
- goto out;
-
- obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj)
- goto out;
- fb = obj_to_fb(obj);
-
- if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- ret = -ENOMEM;
- spin_lock_irqsave(&dev->event_lock, flags);
- if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- goto out;
- }
- file_priv->event_space -= sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL) {
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- goto out;
- }
-
- e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
- e->event.base.length = sizeof e->event;
- e->event.user_data = page_flip->user_data;
- e->base.event = &e->event.base;
- e->base.file_priv = file_priv;
- e->base.destroy =
- (void (*) (struct drm_pending_event *)) kfree;
- }
-
- ret = crtc->funcs->page_flip(crtc, fb, e);
- if (ret) {
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- }
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
+++ /dev/null
-/**
- * \file drm_debugfs.c
- * debugfs support for DRM
- *
- * \author Ben Gamari <bgamari@gmail.com>
- */
-
-/*
- * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
- *
- * Copyright 2008 Ben Gamari <bgamari@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include "drmP.h"
-
-#if defined(CONFIG_DEBUG_FS)
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-static struct drm_info_list drm_debugfs_list[] = {
- {"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
- {"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
- {"bufs", drm_bufs_info, 0},
- {"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
- {"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
-
-
-static int drm_debugfs_open(struct inode *inode, struct file *file)
-{
- struct drm_info_node *node = inode->i_private;
-
- return single_open(file, node->info_ent->show, node);
-}
-
-
-static const struct file_operations drm_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = drm_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-/**
- * Initialize a given set of debugfs files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI debugfs dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of debugfs files represented by an array of
- * gdm_debugfs_lists in the given root directory.
- */
-int drm_debugfs_create_files(struct drm_info_list *files, int count,
- struct dentry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct dentry *ent;
- struct drm_info_node *tmp;
- char name[64];
- int i, ret;
-
- for (i = 0; i < count; i++) {
- u32 features = files[i].driver_features;
-
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
- continue;
-
- tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (tmp == NULL) {
- ret = -1;
- goto fail;
- }
- ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
- root, tmp, &drm_debugfs_fops);
- if (!ent) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
- name, files[i].name);
- kfree(tmp);
- ret = -1;
- goto fail;
- }
-
- tmp->minor = minor;
- tmp->dent = ent;
- tmp->info_ent = &files[i];
- list_add(&(tmp->list), &(minor->debugfs_nodes.list));
- }
- return 0;
-
-fail:
- drm_debugfs_remove_files(files, count, minor);
- return ret;
-}
-EXPORT_SYMBOL(drm_debugfs_create_files);
-
-/**
- * Initialize the DRI debugfs filesystem for a device
- *
- * \param dev DRM device
- * \param minor device minor number
- * \param root DRI debugfs dir entry.
- *
- * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
- * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
- * "/sys/kernel/debug/dri/%minor%/%name%".
- */
-int drm_debugfs_init(struct drm_minor *minor, int minor_id,
- struct dentry *root)
-{
- struct drm_device *dev = minor->dev;
- char name[64];
- int ret;
-
- INIT_LIST_HEAD(&minor->debugfs_nodes.list);
- sprintf(name, "%d", minor_id);
- minor->debugfs_root = debugfs_create_dir(name, root);
- if (!minor->debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
- return -1;
- }
-
- ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
- if (ret) {
- debugfs_remove(minor->debugfs_root);
- minor->debugfs_root = NULL;
- DRM_ERROR("Failed to create core drm debugfs files\n");
- return ret;
- }
-
- if (dev->driver->debugfs_init) {
- ret = dev->driver->debugfs_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/sys/kernel/debug/dri.\n");
- return ret;
- }
- }
- return 0;
-}
-
-
-/**
- * Remove a list of debugfs files
- *
- * \param files The list of files
- * \param count The number of files
- * \param minor The minor of which we should remove the files
- * \return always zero.
- *
- * Remove all debugfs entries created by debugfs_init().
- */
-int drm_debugfs_remove_files(struct drm_info_list *files, int count,
- struct drm_minor *minor)
-{
- struct list_head *pos, *q;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
- tmp = list_entry(pos, struct drm_info_node, list);
- if (tmp->info_ent == &files[i]) {
- debugfs_remove(tmp->dent);
- list_del(pos);
- kfree(tmp);
- }
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_debugfs_remove_files);
-
-/**
- * Cleanup the debugfs filesystem resources.
- *
- * \param minor device minor number.
- * \return always zero.
- *
- * Remove all debugfs entries created by debugfs_init().
- */
-int drm_debugfs_cleanup(struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
-
- if (!minor->debugfs_root)
- return 0;
-
- if (dev->driver->debugfs_cleanup)
- dev->driver->debugfs_cleanup(minor);
-
- drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
-
- debugfs_remove(minor->debugfs_root);
- minor->debugfs_root = NULL;
-
- return 0;
-}
-
-#endif /* CONFIG_DEBUG_FS */
-
+++ /dev/null
-/**
- * \file drm_dma.c
- * DMA IOCTL and function support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Initialize the DMA data.
- *
- * \param dev DRM device.
- * \return zero on success or a negative value on failure.
- *
- * Allocate and initialize a drm_device_dma structure.
- */
-int drm_dma_setup(struct drm_device *dev)
-{
- int i;
-
- dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
- if (!dev->dma)
- return -ENOMEM;
-
- memset(dev->dma, 0, sizeof(*dev->dma));
-
- for (i = 0; i <= DRM_MAX_ORDER; i++)
- memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
-
- return 0;
-}
-
-/**
- * Cleanup the DMA resources.
- *
- * \param dev DRM device.
- *
- * Free all pages associated with DMA buffers, the buffers and pages lists, and
- * finally the drm_device::dma structure itself.
- */
-void drm_dma_takedown(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i, j;
-
- if (!dma)
- return;
-
- /* Clear dma buffers */
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].seg_count) {
- DRM_DEBUG("order %d: buf_count = %d,"
- " seg_count = %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].seg_count);
- for (j = 0; j < dma->bufs[i].seg_count; j++) {
- if (dma->bufs[i].seglist[j]) {
- drm_pci_free(dev, dma->bufs[i].seglist[j]);
- }
- }
- kfree(dma->bufs[i].seglist);
- }
- if (dma->bufs[i].buf_count) {
- for (j = 0; j < dma->bufs[i].buf_count; j++) {
- kfree(dma->bufs[i].buflist[j].dev_private);
- }
- kfree(dma->bufs[i].buflist);
- }
- }
-
- kfree(dma->buflist);
- kfree(dma->pagelist);
- kfree(dev->dma);
- dev->dma = NULL;
-}
-
-/**
- * Free a buffer.
- *
- * \param dev DRM device.
- * \param buf buffer to free.
- *
- * Resets the fields of \p buf.
- */
-void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
-{
- if (!buf)
- return;
-
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
- buf->used = 0;
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
- && waitqueue_active(&buf->dma_wait)) {
- wake_up_interruptible(&buf->dma_wait);
- }
-}
-
-/**
- * Reclaim the buffers.
- *
- * \param file_priv DRM file private.
- *
- * Frees each buffer associated with \p file_priv not already on the hardware.
- */
-void drm_core_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- for (i = 0; i < dma->buf_count; i++) {
- if (dma->buflist[i]->file_priv == file_priv) {
- switch (dma->buflist[i]->list) {
- case DRM_LIST_NONE:
- drm_free_buffer(dev, dma->buflist[i]);
- break;
- case DRM_LIST_WAIT:
- dma->buflist[i]->list = DRM_LIST_RECLAIM;
- break;
- default:
- /* Buffer already on hardware. */
- break;
- }
- }
- }
-}
-
-EXPORT_SYMBOL(drm_core_reclaim_buffers);
+++ /dev/null
-/**
- * \file drm_drawable.c
- * IOCTLs for drawables
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- * \author Michel Dänzer <michel@tungstengraphics.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Allocate drawable ID and memory to store information about it.
- */
-int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- unsigned long irqflags;
- struct drm_draw *draw = data;
- int new_id = 0;
- int ret;
-
-again:
- if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
- ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
- if (ret == -EAGAIN) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- goto again;
- }
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- draw->handle = new_id;
-
- DRM_DEBUG("%d\n", draw->handle);
-
- return 0;
-}
-
-/**
- * Free drawable ID and memory to store information about it.
- */
-int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_draw *draw = data;
- unsigned long irqflags;
- struct drm_drawable_info *info;
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- info = drm_get_drawable_info(dev, draw->handle);
- if (info == NULL) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- return -EINVAL;
- }
- kfree(info->rects);
- kfree(info);
-
- idr_remove(&dev->drw_idr, draw->handle);
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_DEBUG("%d\n", draw->handle);
- return 0;
-}
-
-int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_update_draw *update = data;
- unsigned long irqflags;
- struct drm_clip_rect *rects;
- struct drm_drawable_info *info;
- int err;
-
- info = idr_find(&dev->drw_idr, update->handle);
- if (!info) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
- DRM_ERROR("No such drawable %d\n", update->handle);
- kfree(info);
- return -EINVAL;
- }
- }
-
- switch (update->type) {
- case DRM_DRAWABLE_CLIPRECTS:
- if (update->num == 0)
- rects = NULL;
- else if (update->num != info->num_rects) {
- rects = kmalloc(update->num *
- sizeof(struct drm_clip_rect),
- GFP_KERNEL);
- } else
- rects = info->rects;
-
- if (update->num && !rects) {
- DRM_ERROR("Failed to allocate cliprect memory\n");
- err = -ENOMEM;
- goto error;
- }
-
- if (update->num && DRM_COPY_FROM_USER(rects,
- (struct drm_clip_rect __user *)
- (unsigned long)update->data,
- update->num *
- sizeof(*rects))) {
- DRM_ERROR("Failed to copy cliprects from userspace\n");
- err = -EFAULT;
- goto error;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- if (rects != info->rects) {
- kfree(info->rects);
- }
-
- info->rects = rects;
- info->num_rects = update->num;
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- DRM_DEBUG("Updated %d cliprects for drawable %d\n",
- info->num_rects, update->handle);
- break;
- default:
- DRM_ERROR("Invalid update type %d\n", update->type);
- return -EINVAL;
- }
-
- return 0;
-
-error:
- if (rects != info->rects)
- kfree(rects);
-
- return err;
-}
-
-/**
- * Caller must hold the drawable spinlock!
- */
-struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
-{
- return idr_find(&dev->drw_idr, id);
-}
-EXPORT_SYMBOL(drm_get_drawable_info);
-
-static int drm_drawable_free(int idr, void *p, void *data)
-{
- struct drm_drawable_info *info = p;
-
- if (info) {
- kfree(info->rects);
- kfree(info);
- }
-
- return 0;
-}
-
-void drm_drawable_free_all(struct drm_device *dev)
-{
- idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
- idr_remove_all(&dev->drw_idr);
-}
+++ /dev/null
-/**
- * \file drm_drv.c
- * Generic driver template
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * To use this template, you must at least define the following (samples
- * given for the MGA driver):
- *
- * \code
- * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
- *
- * #define DRIVER_NAME "mga"
- * #define DRIVER_DESC "Matrox G200/G400"
- * #define DRIVER_DATE "20001127"
- *
- * #define drm_x mga_##x
- * \endcode
- */
-
-/*
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/debugfs.h>
-#include "drmP.h"
-#include "drm_core.h"
-
-
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-/** Ioctl table */
-static struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
- /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-#if __OS_HAS_AGP
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#endif
-
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
-};
-
-#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
- struct drm_vma_entry *vma, *vma_temp;
- int i;
-
- DRM_DEBUG("\n");
-
- if (dev->driver->lastclose)
- dev->driver->lastclose(dev);
- DRM_DEBUG("driver lastclose completed\n");
-
- if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- /* Free drawable information memory */
- drm_drawable_free_all(dev);
- del_timer(&dev->timer);
-
- /* Clear AGP information */
- if (drm_core_has_AGP(dev) && dev->agp &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_agp_mem *entry, *tempe;
-
- /* Remove AGP resources, but leave dev->agp
- intact until drv_cleanup is called. */
- list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
- if (entry->bound)
- drm_unbind_agp(entry->memory);
- drm_free_agp(entry->memory, entry->pages);
- kfree(entry);
- }
- INIT_LIST_HEAD(&dev->agp->memory);
-
- if (dev->agp->acquired)
- drm_agp_release(dev);
-
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
- }
- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_sg_cleanup(dev->sg);
- dev->sg = NULL;
- }
-
- /* Clear vma list (only built for debugging) */
- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
- list_del(&vma->head);
- kfree(vma);
- }
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
- for (i = 0; i < dev->queue_count; i++) {
- kfree(dev->queuelist[i]);
- dev->queuelist[i] = NULL;
- }
- kfree(dev->queuelist);
- dev->queuelist = NULL;
- }
- dev->queue_count = 0;
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_dma_takedown(dev);
-
- dev->dev_mapping = NULL;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("lastclose completed\n");
- return 0;
-}
-
-/**
- * Module initialization. Called via init_module at module load time, or via
- * linux/init/main.c (this is not currently supported).
- *
- * \return zero on success or a negative number on failure.
- *
- * Initializes an array of drm_device structures, and attempts to
- * initialize all available devices, using consecutive minors, registering the
- * stubs and initializing the AGP device.
- *
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
-int drm_init(struct drm_driver *driver)
-{
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *pid;
- int i;
-
- DRM_DEBUG("\n");
-
- INIT_LIST_HEAD(&driver->device_list);
-
- if (driver->driver_features & DRIVER_MODESET)
- return pci_register_driver(&driver->pci_driver);
-
- /* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
- pid = &driver->pci_driver.id_table[i];
-
- /* Loop around setting up a DRM device for each PCI device
- * matching our ID and device class. If we had the internal
- * function that pci_get_subsys and pci_get_class used, we'd
- * be able to just pass pid in instead of doing a two-stage
- * thing.
- */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
- pid->subdevice, pdev)) != NULL) {
- if ((pdev->class & pid->class_mask) != pid->class)
- continue;
-
- /* stealth mode requires a manual probe */
- pci_dev_get(pdev);
- drm_get_dev(pdev, pid, driver);
- }
- }
- return 0;
-}
-
-EXPORT_SYMBOL(drm_init);
-
-void drm_exit(struct drm_driver *driver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(&driver->pci_driver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
-
- DRM_INFO("Module unloaded\n");
-}
-
-EXPORT_SYMBOL(drm_exit);
-
-/** File operations structure */
-static const struct file_operations drm_stub_fops = {
- .owner = THIS_MODULE,
- .open = drm_stub_open
-};
-
-static int __init drm_core_init(void)
-{
- int ret = -ENOMEM;
-
- idr_init(&drm_minors_idr);
-
- if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
- goto err_p1;
-
- drm_class = drm_sysfs_create(THIS_MODULE, "drm");
- if (IS_ERR(drm_class)) {
- printk(KERN_ERR "DRM: Error creating drm class.\n");
- ret = PTR_ERR(drm_class);
- goto err_p2;
- }
-
- drm_proc_root = proc_mkdir("dri", NULL);
- if (!drm_proc_root) {
- DRM_ERROR("Cannot create /proc/dri\n");
- ret = -1;
- goto err_p3;
- }
-
- drm_debugfs_root = debugfs_create_dir("dri", NULL);
- if (!drm_debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
- ret = -1;
- goto err_p3;
- }
-
- DRM_INFO("Initialized %s %d.%d.%d %s\n",
- CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
- return 0;
-err_p3:
- drm_sysfs_destroy();
-err_p2:
- unregister_chrdev(DRM_MAJOR, "drm");
-
- idr_destroy(&drm_minors_idr);
-err_p1:
- return ret;
-}
-
-static void __exit drm_core_exit(void)
-{
- remove_proc_entry("dri", NULL);
- debugfs_remove(drm_debugfs_root);
- drm_sysfs_destroy();
-
- unregister_chrdev(DRM_MAJOR, "drm");
-
- idr_destroy(&drm_minors_idr);
-}
-
-module_init(drm_core_init);
-module_exit(drm_core_exit);
-
-/**
- * Copy and IOCTL return string to user space
- */
-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
-{
- int len;
-
- /* don't overflow userbuf */
- len = strlen(value);
- if (len > *buf_len)
- len = *buf_len;
-
- /* let userspace know exact length of driver value (which could be
- * larger than the userspace-supplied buffer) */
- *buf_len = strlen(value);
-
- /* finally, try filling in the userbuf */
- if (len && buf)
- if (copy_to_user(buf, value, len))
- return -EFAULT;
- return 0;
-}
-
-/**
- * Get version information
- *
- * \param inode device inode.
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_version structure.
- * \return zero on success or negative number on failure.
- *
- * Fills in the version information in \p arg.
- */
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_version *version = data;
- int err;
-
- version->version_major = dev->driver->major;
- version->version_minor = dev->driver->minor;
- version->version_patchlevel = dev->driver->patchlevel;
- err = drm_copy_field(version->name, &version->name_len,
- dev->driver->name);
- if (!err)
- err = drm_copy_field(version->date, &version->date_len,
- dev->driver->date);
- if (!err)
- err = drm_copy_field(version->desc, &version->desc_len,
- dev->driver->desc);
-
- return err;
-}
-
-/**
- * Called whenever a process performs an ioctl on /dev/drm.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
- *
- * Looks up the ioctl function in the ::ioctls table, checking for root
- * previleges if so required, and dispatches to the respective function.
- */
-int drm_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_ioctl_desc *ioctl;
- drm_ioctl_t *func;
- unsigned int nr = DRM_IOCTL_NR(cmd);
- int retcode = -EINVAL;
- char stack_kdata[128];
- char *kdata = NULL;
-
- atomic_inc(&dev->ioctl_count);
- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
- ++file_priv->ioctl_count;
-
- DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
- task_pid_nr(current), cmd, nr,
- (long)old_encode_dev(file_priv->minor->device),
- file_priv->authenticated);
-
- if ((nr >= DRM_CORE_IOCTL_COUNT) &&
- ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
- goto err_i1;
- if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
- ioctl = &drm_ioctls[nr];
- cmd = ioctl->cmd;
- } else
- goto err_i1;
-
- /* Do not trust userspace, use our own definition */
- func = ioctl->func;
- /* is there a local override? */
- if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
- func = dev->driver->dma_ioctl;
-
- if (!func) {
- DRM_DEBUG("no function\n");
- retcode = -EINVAL;
- } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
- ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
- ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
- (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
- retcode = -EACCES;
- } else {
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
- }
- }
-
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- _IOC_SIZE(cmd)) != 0) {
- retcode = -EFAULT;
- goto err_i1;
- }
- }
- retcode = func(dev, kdata, file_priv);
-
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- _IOC_SIZE(cmd)) != 0)
- retcode = -EFAULT;
- }
- }
-
- err_i1:
- if (kdata != stack_kdata)
- kfree(kdata);
- atomic_dec(&dev->ioctl_count);
- if (retcode)
- DRM_DEBUG("ret = %x\n", retcode);
- return retcode;
-}
-
-EXPORT_SYMBOL(drm_ioctl);
-
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && entry->map->type == _DRM_SHM &&
- (entry->map->flags & _DRM_CONTAINS_LOCK)) {
- return entry->map;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(drm_getsarea);
+++ /dev/null
-/*
- * Copyright (c) 2006 Luc Verhaegen (quirks list)
- * Copyright (c) 2007-2008 Intel Corporation
- * Jesse Barnes <jesse.barnes@intel.com>
- *
- * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
- * FB layer.
- * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include "drmP.h"
-#include "drm_edid.h"
-
-/*
- * TODO:
- * - support EDID 1.4 (incl. CE blocks)
- */
-
-/*
- * EDID blocks out in the wild have a variety of bugs, try to collect
- * them here (note that userspace may work around broken monitors first,
- * but fixes should make their way here so that the kernel "just works"
- * on as many displays as possible).
- */
-
-/* First detailed mode wrong, use largest 60Hz mode */
-#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
-/* Reported 135MHz pixel clock is too high, needs adjustment */
-#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
-/* Prefer the largest mode at 75 Hz */
-#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
-/* Detail timing is in cm not mm */
-#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
-/* Detailed timing descriptors have bogus size values, so just take the
- * maximum size and use that.
- */
-#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
-/* Monitor forgot to set the first detailed is preferred bit. */
-#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
-/* use +hsync +vsync for detailed mode */
-#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-/* define the number of Extension EDID block */
-#define MAX_EDID_EXT_NUM 4
-
-#define LEVEL_DMT 0
-#define LEVEL_GTF 1
-#define LEVEL_CVT 2
-
-static struct edid_quirk {
- char *vendor;
- int product_id;
- u32 quirks;
-} edid_quirk_list[] = {
- /* Acer AL1706 */
- { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
- /* Acer F51 */
- { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
- /* Unknown Acer */
- { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Belinea 10 15 55 */
- { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
- { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
-
- /* Envision Peripherals, Inc. EN-7100e */
- { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
-
- /* Funai Electronics PM36B */
- { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
- EDID_QUIRK_DETAILED_IN_CM },
-
- /* LG Philips LCD LP154W01-A5 */
- { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
- { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
-
- /* Philips 107p5 CRT */
- { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Proview AY765C */
- { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Samsung SyncMaster 205BW. Note: irony */
- { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
- /* Samsung SyncMaster 22[5-6]BW */
- { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
- { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
-};
-
-
-/* Valid EDID header has these bytes */
-static const u8 edid_header[] = {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
-};
-
-/**
- * edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity check the EDID block by looking at the header, the version number
- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
- * valid.
- */
-static bool edid_is_valid(struct edid *edid)
-{
- int i, score = 0;
- u8 csum = 0;
- u8 *raw_edid = (u8 *)edid;
-
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
-
- if (score == 8) ;
- else if (score >= 6) {
- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
- memcpy(raw_edid, edid_header, sizeof(edid_header));
- } else
- goto bad;
-
- for (i = 0; i < EDID_LENGTH; i++)
- csum += raw_edid[i];
- if (csum) {
- DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
- goto bad;
- }
-
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
- goto bad;
- }
-
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
-
- return 1;
-
-bad:
- if (raw_edid) {
- DRM_ERROR("Raw EDID:\n");
- print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
- printk("\n");
- }
- return 0;
-}
-
-/**
- * edid_vendor - match a string against EDID's obfuscated vendor field
- * @edid: EDID to match
- * @vendor: vendor string
- *
- * Returns true if @vendor is in @edid, false otherwise
- */
-static bool edid_vendor(struct edid *edid, char *vendor)
-{
- char edid_vendor[3];
-
- edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
- edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
- ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
- edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
-
- return !strncmp(edid_vendor, vendor, 3);
-}
-
-/**
- * edid_get_quirks - return quirk flags for a given EDID
- * @edid: EDID to process
- *
- * This tells subsequent routines what fixes they need to apply.
- */
-static u32 edid_get_quirks(struct edid *edid)
-{
- struct edid_quirk *quirk;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
- quirk = &edid_quirk_list[i];
-
- if (edid_vendor(edid, quirk->vendor) &&
- (EDID_PRODUCT_ID(edid) == quirk->product_id))
- return quirk->quirks;
- }
-
- return 0;
-}
-
-#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
-#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
-
-
-/**
- * edid_fixup_preferred - set preferred modes based on quirk list
- * @connector: has mode list to fix up
- * @quirks: quirks list
- *
- * Walk the mode list for @connector, clearing the preferred status
- * on existing modes and setting it anew for the right mode ala @quirks.
- */
-static void edid_fixup_preferred(struct drm_connector *connector,
- u32 quirks)
-{
- struct drm_display_mode *t, *cur_mode, *preferred_mode;
- int target_refresh = 0;
-
- if (list_empty(&connector->probed_modes))
- return;
-
- if (quirks & EDID_QUIRK_PREFER_LARGE_60)
- target_refresh = 60;
- if (quirks & EDID_QUIRK_PREFER_LARGE_75)
- target_refresh = 75;
-
- preferred_mode = list_first_entry(&connector->probed_modes,
- struct drm_display_mode, head);
-
- list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
- cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
-
- if (cur_mode == preferred_mode)
- continue;
-
- /* Largest mode is preferred */
- if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
- preferred_mode = cur_mode;
-
- /* At a given size, try to get closest to target refresh */
- if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
- MODE_REFRESH_DIFF(cur_mode, target_refresh) <
- MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
- preferred_mode = cur_mode;
- }
- }
-
- preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
-}
-
-/*
- * Add the Autogenerated from the DMT spec.
- * This table is copied from xfree86/modes/xf86EdidModes.c.
- * But the mode with Reduced blank feature is deleted.
- */
-static struct drm_display_mode drm_dmt_modes[] = {
- /* 640x350@85Hz */
- { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 350, 382, 385, 445, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x400@85Hz */
- { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 400, 401, 404, 445, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x400@85Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
- 828, 936, 0, 400, 401, 404, 446, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 492, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@85Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
- 752, 832, 0, 480, 481, 484, 509, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@56Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@72Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@85Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
- 896, 1048, 0, 600, 601, 604, 631, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 848x480@60Hz */
- { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
- 976, 1088, 0, 480, 486, 494, 517, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@43Hz, interlace */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@85Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1072, 1376, 0, 768, 769, 772, 808, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@75Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
- 1488, 1696, 0, 768, 771, 778, 805, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x768@85Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
- 1496, 1712, 0, 768, 771, 778, 809, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@75Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
- 1488, 1696, 0, 800, 803, 809, 838, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@85Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
- 1496, 1712, 0, 800, 803, 809, 843, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@85Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
- 1504, 1728, 0, 960, 961, 964, 1011, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@75Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@85Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
- 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@75Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
- 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@85Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
- 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@75Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
- 1688, 1936, 0, 900, 903, 909, 942, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@85Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
- 1696, 1952, 0, 900, 903, 909, 948, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@65Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@70Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@85Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@75Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
- 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@85Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
- 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1729x1344@75Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
- 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1856x1392@75Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
- 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@75Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
- 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@85Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
- 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@75Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
- 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@75HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@85HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-};
-static const int drm_num_dmt_modes =
- sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-
-static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
-{
- int i;
- struct drm_display_mode *ptr, *mode;
-
- mode = NULL;
- for (i = 0; i < drm_num_dmt_modes; i++) {
- ptr = &drm_dmt_modes[i];
- if (hsize == ptr->hdisplay &&
- vsize == ptr->vdisplay &&
- fresh == drm_mode_vrefresh(ptr)) {
- /* get the expected default mode */
- mode = drm_mode_duplicate(dev, ptr);
- break;
- }
- }
- return mode;
-}
-
-/*
- * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
- * monitors fill with ascii space (0x20) instead.
- */
-static int
-bad_std_timing(u8 a, u8 b)
-{
- return (a == 0x00 && b == 0x00) ||
- (a == 0x01 && b == 0x01) ||
- (a == 0x20 && b == 0x20);
-}
-
-/**
- * drm_mode_std - convert standard mode info (width, height, refresh) into mode
- * @t: standard timing params
- * @timing_level: standard timing level
- *
- * Take the standard timing params (in this case width, aspect, and refresh)
- * and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
- */
-struct drm_display_mode *drm_mode_std(struct drm_device *dev,
- struct std_timing *t,
- int revision,
- int timing_level)
-{
- struct drm_display_mode *mode;
- int hsize, vsize;
- int vrefresh_rate;
- unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
- >> EDID_TIMING_ASPECT_SHIFT;
- unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
- >> EDID_TIMING_VFREQ_SHIFT;
-
- if (bad_std_timing(t->hsize, t->vfreq_aspect))
- return NULL;
-
- /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
- hsize = t->hsize * 8 + 248;
- /* vrefresh_rate = vfreq + 60 */
- vrefresh_rate = vfreq + 60;
- /* the vdisplay is calculated based on the aspect ratio */
- if (aspect_ratio == 0) {
- if (revision < 3)
- vsize = hsize;
- else
- vsize = (hsize * 10) / 16;
- } else if (aspect_ratio == 1)
- vsize = (hsize * 3) / 4;
- else if (aspect_ratio == 2)
- vsize = (hsize * 4) / 5;
- else
- vsize = (hsize * 9) / 16;
- /* HDTV hack */
- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
- false);
- mode->hdisplay = 1366;
- mode->vsync_start = mode->vsync_start - 1;
- mode->vsync_end = mode->vsync_end - 1;
- return mode;
- }
- mode = NULL;
- /* check whether it can be found in default mode table */
- mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
- if (mode)
- return mode;
-
- switch (timing_level) {
- case LEVEL_DMT:
- break;
- case LEVEL_GTF:
- mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
- break;
- case LEVEL_CVT:
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
- false);
- break;
- }
- return mode;
-}
-
-/**
- * drm_mode_detailed - create a new mode from an EDID detailed timing section
- * @dev: DRM device (needed to create new mode)
- * @edid: EDID block
- * @timing: EDID detailed timing info
- * @quirks: quirks to apply
- *
- * An EDID detailed timing block contains enough info for us to create and
- * return a new struct drm_display_mode.
- */
-static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
- struct edid *edid,
- struct detailed_timing *timing,
- u32 quirks)
-{
- struct drm_display_mode *mode;
- struct detailed_pixel_timing *pt = &timing->data.pixel_data;
- unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
- unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
- unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
- unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
- unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
- unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
- unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
-
- /* ignore tiny modes */
- if (hactive < 64 || vactive < 64)
- return NULL;
-
- if (pt->misc & DRM_EDID_PT_STEREO) {
- printk(KERN_WARNING "stereo mode not supported\n");
- return NULL;
- }
- if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- printk(KERN_WARNING "integrated sync not supported\n");
- return NULL;
- }
-
- /* it is incorrect if hsync/vsync width is zero */
- if (!hsync_pulse_width || !vsync_pulse_width) {
- DRM_DEBUG_KMS("Incorrect Detailed timing. "
- "Wrong Hsync/Vsync pulse width\n");
- return NULL;
- }
- mode = drm_mode_create(dev);
- if (!mode)
- return NULL;
-
- mode->type = DRM_MODE_TYPE_DRIVER;
-
- if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
- timing->pixel_clock = cpu_to_le16(1088);
-
- mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
-
- mode->hdisplay = hactive;
- mode->hsync_start = mode->hdisplay + hsync_offset;
- mode->hsync_end = mode->hsync_start + hsync_pulse_width;
- mode->htotal = mode->hdisplay + hblank;
-
- mode->vdisplay = vactive;
- mode->vsync_start = mode->vdisplay + vsync_offset;
- mode->vsync_end = mode->vsync_start + vsync_pulse_width;
- mode->vtotal = mode->vdisplay + vblank;
-
- /* perform the basic check for the detailed timing */
- if (mode->hsync_end > mode->htotal ||
- mode->vsync_end > mode->vtotal) {
- drm_mode_destroy(dev, mode);
- DRM_DEBUG_KMS("Incorrect detailed timing. "
- "Sync is beyond the blank.\n");
- return NULL;
- }
-
- /* Some EDIDs have bogus h/vtotal values */
- if (mode->hsync_end > mode->htotal)
- mode->htotal = mode->hsync_end + 1;
- if (mode->vsync_end > mode->vtotal)
- mode->vtotal = mode->vsync_end + 1;
-
- drm_mode_set_name(mode);
-
- if (pt->misc & DRM_EDID_PT_INTERLACED)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
- pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
- }
-
- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
- mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
-
- mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
- mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
-
- if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
- mode->width_mm *= 10;
- mode->height_mm *= 10;
- }
-
- if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
- mode->width_mm = edid->width_cm * 10;
- mode->height_mm = edid->height_cm * 10;
- }
-
- return mode;
-}
-
-/*
- * Detailed mode info for the EDID "established modes" data to use.
- */
-static struct drm_display_mode edid_est_modes[] = {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
- 768, 864, 0, 480, 483, 486, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
- 846, 900, 0, 400, 421, 423, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
- 846, 900, 0, 400, 412, 414, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 776, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
- { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
- 928, 1152, 0, 624, 625, 628, 667, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
-};
-
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
-
-/**
- * add_established_modes - get est. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Each EDID block contains a bitmap of the supported "established modes" list
- * (defined above). Tease them out and add them to the global modes list.
- */
-static int add_established_modes(struct drm_connector *connector, struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- unsigned long est_bits = edid->established_timings.t1 |
- (edid->established_timings.t2 << 8) |
- ((edid->established_timings.mfg_rsvd & 0x80) << 9);
- int i, modes = 0;
-
- for (i = 0; i <= EDID_EST_TIMINGS; i++)
- if (est_bits & (1<<i)) {
- struct drm_display_mode *newmode;
- newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
-
- return modes;
-}
-/**
- * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
- */
-static int standard_timing_level(struct edid *edid)
-{
- if (edid->revision >= 2) {
- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
- return LEVEL_CVT;
- return LEVEL_GTF;
- }
- return LEVEL_DMT;
-}
-
-/**
- * add_standard_modes - get std. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Standard modes can be calculated using the CVT standard. Grab them from
- * @edid, calculate them, and add them to the list.
- */
-static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- int i, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
-
- for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct std_timing *t = &edid->standard_timings[i];
- struct drm_display_mode *newmode;
-
- /* If std timings bytes are 1, 1 it's empty */
- if (t->hsize == 1 && t->vfreq_aspect == 1)
- continue;
-
- newmode = drm_mode_std(dev, &edid->standard_timings[i],
- edid->revision, timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
-
- return modes;
-}
-
-/*
- * XXX fix this for:
- * - GTF secondary curve formula
- * - EDID 1.4 range offsets
- * - CVT extended bits
- */
-static bool
-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
-{
- struct detailed_data_monitor_range *range;
- int hsync, vrefresh;
-
- range = &timing->data.other_data.data.range;
-
- hsync = drm_mode_hsync(mode);
- vrefresh = drm_mode_vrefresh(mode);
-
- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
- return false;
-
- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
- return false;
-
- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
- /* be forgiving since it's in units of 10MHz */
- int max_clock = range->pixel_clock_mhz * 10 + 9;
- max_clock *= 1000;
- if (mode->clock > max_clock)
- return false;
- }
-
- return true;
-}
-
-/*
- * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
- * need to account for them.
- */
-static int drm_gtf_modes_for_range(struct drm_connector *connector,
- struct detailed_timing *timing)
-{
- int i, modes = 0;
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
-
- for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, timing)) {
- newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- }
-
- return modes;
-}
-
-static int drm_cvt_modes(struct drm_connector *connector,
- struct detailed_timing *timing)
-{
- int i, j, modes = 0;
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
- struct cvt_timing *cvt;
- const int rates[] = { 60, 85, 75, 60, 50 };
-
- for (i = 0; i < 4; i++) {
- int width, height;
- cvt = &(timing->data.other_data.data.cvt[i]);
-
- height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
- switch (cvt->code[1] & 0xc0) {
- case 0x00:
- width = height * 4 / 3;
- break;
- case 0x40:
- width = height * 16 / 9;
- break;
- case 0x80:
- width = height * 16 / 10;
- break;
- case 0xc0:
- width = height * 15 / 9;
- break;
- }
-
- for (j = 1; j < 5; j++) {
- if (cvt->code[2] & (1 << j)) {
- newmode = drm_cvt_mode(dev, width, height,
- rates[j], j == 0,
- false, false);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- }
- }
-
- return modes;
-}
-
-static int add_detailed_modes(struct drm_connector *connector,
- struct detailed_timing *timing,
- struct edid *edid, u32 quirks, int preferred)
-{
- int i, modes = 0;
- struct detailed_non_pixel *data = &timing->data.other_data;
- int timing_level = standard_timing_level(edid);
- int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
-
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- return 0;
-
- if (preferred)
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
-
- drm_mode_probed_add(connector, newmode);
- return 1;
- }
-
- /* other timing types */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_RANGE:
- if (gtf)
- modes += drm_gtf_modes_for_range(connector, timing);
- break;
- case EDID_DETAIL_STD_MODES:
- /* Six modes per detailed section */
- for (i = 0; i < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[i];
- newmode = drm_mode_std(dev, std, edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- case EDID_DETAIL_CVT_3BYTE:
- modes += drm_cvt_modes(connector, timing);
- break;
- default:
- break;
- }
-
- return modes;
-}
-
-/**
- * add_detailed_info - get detailed mode info from EDID data
- * @connector: attached connector
- * @edid: EDID block to scan
- * @quirks: quirks to apply
- *
- * Some of the detailed timing sections may contain mode information. Grab
- * it and add it to the list.
- */
-static int add_detailed_info(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
-{
- int i, modes = 0;
-
- for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
- struct detailed_timing *timing = &edid->detailed_timings[i];
- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
-
- /* In 1.0, only timings are allowed */
- if (!timing->pixel_clock && edid->version == 1 &&
- edid->revision == 0)
- continue;
-
- modes += add_detailed_modes(connector, timing, edid, quirks,
- preferred);
- }
-
- return modes;
-}
-
-/**
- * add_detailed_mode_eedid - get detailed mode info from addtional timing
- * EDID block
- * @connector: attached connector
- * @edid: EDID block to scan(It is only to get addtional timing EDID block)
- * @quirks: quirks to apply
- *
- * Some of the detailed timing sections may contain mode information. Grab
- * it and add it to the list.
- */
-static int add_detailed_info_eedid(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
-{
- int i, modes = 0;
- char *edid_ext = NULL;
- struct detailed_timing *timing;
- int edid_ext_num;
- int start_offset, end_offset;
- int timing_level;
-
- if (edid->version == 1 && edid->revision < 3) {
- /* If the EDID version is less than 1.3, there is no
- * extension EDID.
- */
- return 0;
- }
- if (!edid->extensions) {
- /* if there is no extension EDID, it is unnecessary to
- * parse the E-EDID to get detailed info
- */
- return 0;
- }
-
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
-
- /* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
- if (edid_ext[0] == 0x02)
- break;
- }
-
- if (i == edid_ext_num) {
- /* if there is no additional timing EDID block, return */
- return 0;
- }
-
- /* Get the start offset of detailed timing block */
- start_offset = edid_ext[2];
- if (start_offset == 0) {
- /* If the start_offset is zero, it means that neither detailed
- * info nor data block exist. In such case it is also
- * unnecessary to parse the detailed timing info.
- */
- return 0;
- }
-
- timing_level = standard_timing_level(edid);
- end_offset = EDID_LENGTH;
- end_offset -= sizeof(struct detailed_timing);
- for (i = start_offset; i < end_offset;
- i += sizeof(struct detailed_timing)) {
- timing = (struct detailed_timing *)(edid_ext + i);
- modes += add_detailed_modes(connector, timing, edid, quirks, 0);
- }
-
- return modes;
-}
-
-#define DDC_ADDR 0x50
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Try to fetch EDID information by calling i2c driver function.
- */
-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
- unsigned char *buf, int len)
-{
- unsigned char start = 0x0;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
-
- return -1;
-}
-EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-
-static int drm_ddc_read_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter,
- char *buf, int len)
-{
- int i;
-
- for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, buf, len))
- return -1;
- if (edid_is_valid((struct edid *)buf))
- return 0;
- }
-
- /* repeated checksum failures; warn, but carry on */
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return -1;
-}
-
-/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
- *
- * Poke the given connector's i2c channel to grab EDID data if possible.
- *
- * Return edid data or NULL if we couldn't find any.
- */
-struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter)
-{
- int ret;
- struct edid *edid;
-
- edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
- if (edid == NULL) {
- dev_warn(&connector->dev->pdev->dev,
- "Failed to allocate EDID\n");
- goto end;
- }
-
- /* Read first EDID block */
- ret = drm_ddc_read_edid(connector, adapter,
- (unsigned char *)edid, EDID_LENGTH);
- if (ret != 0)
- goto clean_up;
-
- /* There are EDID extensions to be read */
- if (edid->extensions != 0) {
- int edid_ext_num = edid->extensions;
-
- if (edid_ext_num > MAX_EDID_EXT_NUM) {
- dev_warn(&connector->dev->pdev->dev,
- "The number of extension(%d) is "
- "over max (%d), actually read number (%d)\n",
- edid_ext_num, MAX_EDID_EXT_NUM,
- MAX_EDID_EXT_NUM);
- /* Reset EDID extension number to be read */
- edid_ext_num = MAX_EDID_EXT_NUM;
- }
- /* Read EDID including extensions too */
- ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
- EDID_LENGTH * (edid_ext_num + 1));
- if (ret != 0)
- goto clean_up;
-
- }
-
- connector->display_info.raw_edid = (char *)edid;
- goto end;
-
-clean_up:
- kfree(edid);
- edid = NULL;
-end:
- return edid;
-
-}
-EXPORT_SYMBOL(drm_get_edid);
-
-#define HDMI_IDENTIFIER 0x000C03
-#define VENDOR_BLOCK 0x03
-/**
- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
- * @edid: monitor EDID information
- *
- * Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI, false if not or unknown.
- */
-bool drm_detect_hdmi_monitor(struct edid *edid)
-{
- char *edid_ext = NULL;
- int i, hdmi_id, edid_ext_num;
- int start_offset, end_offset;
- bool is_hdmi = false;
-
- /* No EDID or EDID extensions */
- if (edid == NULL || edid->extensions == 0)
- goto end;
-
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
-
- /* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
- if (edid_ext[0] == 0x02)
- break;
- }
-
- if (i == edid_ext_num)
- goto end;
-
- /* Data block offset in CEA extension block */
- start_offset = 4;
- end_offset = edid_ext[2];
-
- /*
- * Because HDMI identifier is in Vendor Specific Block,
- * search it from all data blocks of CEA extension.
- */
- for (i = start_offset; i < end_offset;
- /* Increased by data block len */
- i += ((edid_ext[i] & 0x1f) + 1)) {
- /* Find vendor specific block */
- if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
- hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
- edid_ext[i + 3] << 16;
- /* Find HDMI identifier */
- if (hdmi_id == HDMI_IDENTIFIER)
- is_hdmi = true;
- break;
- }
- }
-
-end:
- return is_hdmi;
-}
-EXPORT_SYMBOL(drm_detect_hdmi_monitor);
-
-/**
- * drm_add_edid_modes - add modes from EDID data, if available
- * @connector: connector we're probing
- * @edid: edid data
- *
- * Add the specified modes to the connector's mode list.
- *
- * Return number of modes added or 0 if we couldn't find any.
- */
-int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
-{
- int num_modes = 0;
- u32 quirks;
-
- if (edid == NULL) {
- return 0;
- }
- if (!edid_is_valid(edid)) {
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return 0;
- }
-
- quirks = edid_get_quirks(edid);
-
- num_modes += add_established_modes(connector, edid);
- num_modes += add_standard_modes(connector, edid);
- num_modes += add_detailed_info(connector, edid, quirks);
- num_modes += add_detailed_info_eedid(connector, edid, quirks);
-
- if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
- edid_fixup_preferred(connector, quirks);
-
- connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
- connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
- connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
- connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
- connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
- connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
- connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
- connector->display_info.width_mm = edid->width_cm * 10;
- connector->display_info.height_mm = edid->height_cm * 10;
- connector->display_info.gamma = edid->gamma;
- connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
- connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
- connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
- connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
- connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
- connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
- connector->display_info.gamma = edid->gamma;
-
- return num_modes;
-}
-EXPORT_SYMBOL(drm_add_edid_modes);
-
-/**
- * drm_add_modes_noedid - add modes for the connectors without EDID
- * @connector: connector we're probing
- * @hdisplay: the horizontal display limit
- * @vdisplay: the vertical display limit
- *
- * Add the specified modes to the connector's mode list. Only when the
- * hdisplay/vdisplay is not beyond the given limit, it will be added.
- *
- * Return number of modes added or 0 if we couldn't find any.
- */
-int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay)
-{
- int i, count, num_modes = 0;
- struct drm_display_mode *mode, *ptr;
- struct drm_device *dev = connector->dev;
-
- count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
- if (hdisplay < 0)
- hdisplay = 0;
- if (vdisplay < 0)
- vdisplay = 0;
-
- for (i = 0; i < count; i++) {
- ptr = &drm_dmt_modes[i];
- if (hdisplay && vdisplay) {
- /*
- * Only when two are valid, they will be used to check
- * whether the mode should be added to the mode list of
- * the connector.
- */
- if (ptr->hdisplay > hdisplay ||
- ptr->vdisplay > vdisplay)
- continue;
- }
- if (drm_mode_vrefresh(ptr) > 61)
- continue;
- mode = drm_mode_duplicate(dev, ptr);
- if (mode) {
- drm_mode_probed_add(connector, mode);
- num_modes++;
- }
- }
- return num_modes;
-}
-EXPORT_SYMBOL(drm_add_modes_noedid);
+++ /dev/null
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drm_encoder_slave.h"
-
-/**
- * drm_i2c_encoder_init - Initialize an I2C slave encoder
- * @dev: DRM device.
- * @encoder: Encoder to be attached to the I2C device. You aren't
- * required to have called drm_encoder_init() before.
- * @adap: I2C adapter that will be used to communicate with
- * the device.
- * @info: Information that will be used to create the I2C device.
- * Required fields are @addr and @type.
- *
- * Create an I2C device on the specified bus (the module containing its
- * driver is transparently loaded) and attach it to the specified
- * &drm_encoder_slave. The @slave_funcs field will be initialized with
- * the hooks provided by the slave driver.
- *
- * Returns 0 on success or a negative errno on failure, in particular,
- * -ENODEV is returned when no matching driver is found.
- */
-int drm_i2c_encoder_init(struct drm_device *dev,
- struct drm_encoder_slave *encoder,
- struct i2c_adapter *adap,
- const struct i2c_board_info *info)
-{
- char modalias[sizeof(I2C_MODULE_PREFIX)
- + I2C_NAME_SIZE];
- struct module *module = NULL;
- struct i2c_client *client;
- struct drm_i2c_encoder_driver *encoder_drv;
- int err = 0;
-
- snprintf(modalias, sizeof(modalias),
- "%s%s", I2C_MODULE_PREFIX, info->type);
- request_module(modalias);
-
- client = i2c_new_device(adap, info);
- if (!client) {
- err = -ENOMEM;
- goto fail;
- }
-
- if (!client->driver) {
- err = -ENODEV;
- goto fail_unregister;
- }
-
- module = client->driver->driver.owner;
- if (!try_module_get(module)) {
- err = -ENODEV;
- goto fail_unregister;
- }
-
- encoder->bus_priv = client;
-
- encoder_drv = to_drm_i2c_encoder_driver(client->driver);
-
- err = encoder_drv->encoder_init(client, dev, encoder);
- if (err)
- goto fail_unregister;
-
- return 0;
-
-fail_unregister:
- i2c_unregister_device(client);
- module_put(module);
-fail:
- return err;
-}
-EXPORT_SYMBOL(drm_i2c_encoder_init);
-
-/**
- * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
- * @drm_encoder: Encoder to be unregistered.
- *
- * This should be called from the @destroy method of an I2C slave
- * encoder driver once I2C access is no longer needed.
- */
-void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
-{
- struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
- struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
- struct module *module = client->driver->driver.owner;
-
- i2c_unregister_device(client);
- encoder->bus_priv = NULL;
-
- module_put(module);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_destroy);
+++ /dev/null
-/**
- * \file drm_fops.c
- * File operations for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Daryll Strauss <daryll@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include <linux/poll.h>
-#include <linux/smp_lock.h>
-
-static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_device * dev);
-
-static int drm_setup(struct drm_device * dev)
-{
- int i;
- int ret;
-
- if (dev->driver->firstopen) {
- ret = dev->driver->firstopen(dev);
- if (ret != 0)
- return ret;
- }
-
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- i = drm_dma_setup(dev);
- if (i < 0)
- return i;
- }
-
- for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
- atomic_set(&dev->counts[i], 0);
-
- dev->sigdata.lock = NULL;
-
- dev->queue_count = 0;
- dev->queue_reserved = 0;
- dev->queue_slots = 0;
- dev->queuelist = NULL;
- dev->context_flag = 0;
- dev->interrupt_flag = 0;
- dev->dma_flag = 0;
- dev->last_context = 0;
- dev->last_switch = 0;
- dev->last_checked = 0;
- init_waitqueue_head(&dev->context_wait);
- dev->if_version = 0;
-
- dev->ctx_start = 0;
- dev->lck_start = 0;
-
- dev->buf_async = NULL;
- init_waitqueue_head(&dev->buf_readers);
- init_waitqueue_head(&dev->buf_writers);
-
- DRM_DEBUG("\n");
-
- /*
- * The kernel's context could be created here, but is now created
- * in drm_dma_enqueue. This is more resource-efficient for
- * hardware that does not do DMA, but may mean that
- * drm_select_queue fails between the time the interrupt is
- * initialized and the time the queues are initialized.
- */
-
- return 0;
-}
-
-/**
- * Open file.
- *
- * \param inode device inode
- * \param filp file pointer.
- * \return zero on success or a negative number on failure.
- *
- * Searches the DRM device with the same minor number, calls open_helper(), and
- * increments the device open count. If the open count was previous at zero,
- * i.e., it's the first that the device is open, then calls setup().
- */
-int drm_open(struct inode *inode, struct file *filp)
-{
- struct drm_device *dev = NULL;
- int minor_id = iminor(inode);
- struct drm_minor *minor;
- int retcode = 0;
-
- minor = idr_find(&drm_minors_idr, minor_id);
- if (!minor)
- return -ENODEV;
-
- if (!(dev = minor->dev))
- return -ENODEV;
-
- retcode = drm_open_helper(inode, filp, dev);
- if (!retcode) {
- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- spin_lock(&dev->count_lock);
- if (!dev->open_count++) {
- spin_unlock(&dev->count_lock);
- retcode = drm_setup(dev);
- goto out;
- }
- spin_unlock(&dev->count_lock);
- }
-out:
- mutex_lock(&dev->struct_mutex);
- if (minor->type == DRM_MINOR_LEGACY) {
- BUG_ON((dev->dev_mapping != NULL) &&
- (dev->dev_mapping != inode->i_mapping));
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = inode->i_mapping;
- }
- mutex_unlock(&dev->struct_mutex);
-
- return retcode;
-}
-EXPORT_SYMBOL(drm_open);
-
-/**
- * File \c open operation.
- *
- * \param inode device inode.
- * \param filp file pointer.
- *
- * Puts the dev->fops corresponding to the device minor number into
- * \p filp, call the \c open method, and restore the file operations.
- */
-int drm_stub_open(struct inode *inode, struct file *filp)
-{
- struct drm_device *dev = NULL;
- struct drm_minor *minor;
- int minor_id = iminor(inode);
- int err = -ENODEV;
- const struct file_operations *old_fops;
-
- DRM_DEBUG("\n");
-
- /* BKL pushdown: note that nothing else serializes idr_find() */
- lock_kernel();
- minor = idr_find(&drm_minors_idr, minor_id);
- if (!minor)
- goto out;
-
- if (!(dev = minor->dev))
- goto out;
-
- old_fops = filp->f_op;
- filp->f_op = fops_get(&dev->driver->fops);
- if (filp->f_op == NULL) {
- filp->f_op = old_fops;
- goto out;
- }
- if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
- fops_put(filp->f_op);
- filp->f_op = fops_get(old_fops);
- }
- fops_put(old_fops);
-
-out:
- unlock_kernel();
- return err;
-}
-
-/**
- * Check whether DRI will run on this CPU.
- *
- * \return non-zero if the DRI will run on this CPU, or zero otherwise.
- */
-static int drm_cpu_valid(void)
-{
-#if defined(__i386__)
- if (boot_cpu_data.x86 == 3)
- return 0; /* No cmpxchg on a 386 */
-#endif
-#if defined(__sparc__) && !defined(__sparc_v9__)
- return 0; /* No cmpxchg before v9 sparc. */
-#endif
- return 1;
-}
-
-/**
- * Called whenever a process opens /dev/drm.
- *
- * \param inode device inode.
- * \param filp file pointer.
- * \param dev device.
- * \return zero on success or a negative number on failure.
- *
- * Creates and initializes a drm_file structure for the file private data in \p
- * filp and add it into the double linked list in \p dev.
- */
-static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_device * dev)
-{
- int minor_id = iminor(inode);
- struct drm_file *priv;
- int ret;
-
- if (filp->f_flags & O_EXCL)
- return -EBUSY; /* No exclusive opens */
- if (!drm_cpu_valid())
- return -EINVAL;
-
- DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
-
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- memset(priv, 0, sizeof(*priv));
- filp->private_data = priv;
- priv->filp = filp;
- priv->uid = current_euid();
- priv->pid = task_pid_nr(current);
- priv->minor = idr_find(&drm_minors_idr, minor_id);
- priv->ioctl_count = 0;
- /* for compatibility root is always authenticated */
- priv->authenticated = capable(CAP_SYS_ADMIN);
- priv->lock_count = 0;
-
- INIT_LIST_HEAD(&priv->lhead);
- INIT_LIST_HEAD(&priv->fbs);
- INIT_LIST_HEAD(&priv->event_list);
- init_waitqueue_head(&priv->event_wait);
- priv->event_space = 4096; /* set aside 4k for event buffer */
-
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_open(dev, priv);
-
- if (dev->driver->open) {
- ret = dev->driver->open(dev, priv);
- if (ret < 0)
- goto out_free;
- }
-
-
- /* if there is no current master make this fd it */
- mutex_lock(&dev->struct_mutex);
- if (!priv->minor->master) {
- /* create a new master */
- priv->minor->master = drm_master_create(priv->minor);
- if (!priv->minor->master) {
- mutex_unlock(&dev->struct_mutex);
- ret = -ENOMEM;
- goto out_free;
- }
-
- priv->is_master = 1;
- /* take another reference for the copy in the local file priv */
- priv->master = drm_master_get(priv->minor->master);
-
- priv->authenticated = 1;
-
- mutex_unlock(&dev->struct_mutex);
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, priv->master);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- /* drop both references if this fails */
- drm_master_put(&priv->minor->master);
- drm_master_put(&priv->master);
- mutex_unlock(&dev->struct_mutex);
- goto out_free;
- }
- }
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, priv, true);
- if (ret) {
- /* drop both references if this fails */
- drm_master_put(&priv->minor->master);
- drm_master_put(&priv->master);
- mutex_unlock(&dev->struct_mutex);
- goto out_free;
- }
- }
- mutex_unlock(&dev->struct_mutex);
- } else {
- /* get a reference to the master */
- priv->master = drm_master_get(priv->minor->master);
- mutex_unlock(&dev->struct_mutex);
- }
-
- mutex_lock(&dev->struct_mutex);
- list_add(&priv->lhead, &dev->filelist);
- mutex_unlock(&dev->struct_mutex);
-
-#ifdef __alpha__
- /*
- * Default the hose
- */
- if (!dev->hose) {
- struct pci_dev *pci_dev;
- pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
- if (pci_dev) {
- dev->hose = pci_dev->sysdata;
- pci_dev_put(pci_dev);
- }
- if (!dev->hose) {
- struct pci_bus *b = pci_bus_b(pci_root_buses.next);
- if (b)
- dev->hose = b->sysdata;
- }
- }
-#endif
-
- return 0;
- out_free:
- kfree(priv);
- filp->private_data = NULL;
- return ret;
-}
-
-/** No-op. */
-int drm_fasync(int fd, struct file *filp, int on)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
- (long)old_encode_dev(priv->minor->device));
- return fasync_helper(fd, filp, on, &dev->buf_async);
-}
-EXPORT_SYMBOL(drm_fasync);
-
-/*
- * Reclaim locked buffers; note that this may be a bad idea if the current
- * context doesn't have the hw lock...
- */
-static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
-{
- struct drm_file *file_priv = f->private_data;
-
- if (drm_i_have_hw_lock(dev, file_priv)) {
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- } else {
- unsigned long _end = jiffies + 3 * DRM_HZ;
- int locked = 0;
-
- drm_idlelock_take(&file_priv->master->lock);
-
- /*
- * Wait for a while.
- */
- do {
- spin_lock_bh(&file_priv->master->lock.spinlock);
- locked = file_priv->master->lock.idle_has_lock;
- spin_unlock_bh(&file_priv->master->lock.spinlock);
- if (locked)
- break;
- schedule();
- } while (!time_after_eq(jiffies, _end));
-
- if (!locked) {
- DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
- "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
- "\tI will go on reclaiming the buffers anyway.\n");
- }
-
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-}
-
-static void drm_master_release(struct drm_device *dev, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
-
- if (dev->driver->reclaim_buffers_locked &&
- file_priv->master->lock.hw_lock)
- drm_reclaim_locked_buffers(dev, filp);
-
- if (dev->driver->reclaim_buffers_idlelocked &&
- file_priv->master->lock.hw_lock) {
- drm_idlelock_take(&file_priv->master->lock);
- dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-
-
- if (drm_i_have_hw_lock(dev, file_priv)) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- drm_lock_free(&file_priv->master->lock,
- _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- }
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !dev->driver->reclaim_buffers_locked) {
- dev->driver->reclaim_buffers(dev, file_priv);
- }
-}
-
-static void drm_events_release(struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e, *et;
- struct drm_pending_vblank_event *v, *vt;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- /* Remove pending flips */
- list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
- if (v->base.file_priv == file_priv) {
- list_del(&v->base.link);
- drm_vblank_put(dev, v->pipe);
- v->base.destroy(&v->base);
- }
-
- /* Remove unconsumed events */
- list_for_each_entry_safe(e, et, &file_priv->event_list, link)
- e->destroy(e);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-/**
- * Release file.
- *
- * \param inode device inode
- * \param file_priv DRM file private.
- * \return zero on success or a negative number on failure.
- *
- * If the hardware lock is held then free it, and take it again for the kernel
- * context since it's necessary to reclaim buffers. Unlink the file private
- * data from its list and free it. Decreases the open count and if it reaches
- * zero calls drm_lastclose().
- */
-int drm_release(struct inode *inode, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
- int retcode = 0;
-
- lock_kernel();
-
- DRM_DEBUG("open_count = %d\n", dev->open_count);
-
- if (dev->driver->preclose)
- dev->driver->preclose(dev, file_priv);
-
- /* ========================================================
- * Begin inline drm_release
- */
-
- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->device),
- dev->open_count);
-
- /* if the master has gone away we can't do anything with the lock */
- if (file_priv->minor->master)
- drm_master_release(dev, filp);
-
- drm_events_release(file_priv);
-
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_release(dev, file_priv);
-
- if (dev->driver->driver_features & DRIVER_MODESET)
- drm_fb_release(file_priv);
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == file_priv &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev,
- pos->handle);
-
- drm_ctxbitmap_free(dev, pos->handle);
-
- list_del(&pos->head);
- kfree(pos);
- --dev->ctx_count;
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-
- mutex_lock(&dev->struct_mutex);
-
- if (file_priv->is_master) {
- struct drm_master *master = file_priv->master;
- struct drm_file *temp;
- list_for_each_entry(temp, &dev->filelist, lhead) {
- if ((temp->master == file_priv->master) &&
- (temp != file_priv))
- temp->authenticated = 0;
- }
-
- /**
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
-
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
-
- if (file_priv->minor->master == file_priv->master) {
- /* drop the reference held my the minor */
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, true);
- drm_master_put(&file_priv->minor->master);
- }
- }
-
- /* drop the reference held my the file priv */
- drm_master_put(&file_priv->master);
- file_priv->is_master = 0;
- list_del(&file_priv->lhead);
- mutex_unlock(&dev->struct_mutex);
-
- if (dev->driver->postclose)
- dev->driver->postclose(dev, file_priv);
- kfree(file_priv);
-
- /* ========================================================
- * End inline drm_release
- */
-
- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
- spin_lock(&dev->count_lock);
- if (!--dev->open_count) {
- if (atomic_read(&dev->ioctl_count)) {
- DRM_ERROR("Device busy: %d\n",
- atomic_read(&dev->ioctl_count));
- spin_unlock(&dev->count_lock);
- unlock_kernel();
- return -EBUSY;
- }
- spin_unlock(&dev->count_lock);
- unlock_kernel();
- return drm_lastclose(dev);
- }
- spin_unlock(&dev->count_lock);
-
- unlock_kernel();
-
- return retcode;
-}
-EXPORT_SYMBOL(drm_release);
-
-static bool
-drm_dequeue_event(struct drm_file *file_priv,
- size_t total, size_t max, struct drm_pending_event **out)
-{
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e;
- unsigned long flags;
- bool ret = false;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- *out = NULL;
- if (list_empty(&file_priv->event_list))
- goto out;
- e = list_first_entry(&file_priv->event_list,
- struct drm_pending_event, link);
- if (e->event->length + total > max)
- goto out;
-
- file_priv->event_space += e->event->length;
- list_del(&e->link);
- *out = e;
- ret = true;
-
-out:
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return ret;
-}
-
-ssize_t drm_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_pending_event *e;
- size_t total;
- ssize_t ret;
-
- ret = wait_event_interruptible(file_priv->event_wait,
- !list_empty(&file_priv->event_list));
- if (ret < 0)
- return ret;
-
- total = 0;
- while (drm_dequeue_event(file_priv, total, count, &e)) {
- if (copy_to_user(buffer + total,
- e->event, e->event->length)) {
- total = -EFAULT;
- break;
- }
-
- total += e->event->length;
- e->destroy(e);
- }
-
- return total;
-}
-EXPORT_SYMBOL(drm_read);
-
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
-{
- struct drm_file *file_priv = filp->private_data;
- unsigned int mask = 0;
-
- poll_wait(filp, &file_priv->event_wait, wait);
-
- if (!list_empty(&file_priv->event_list))
- mask |= POLLIN | POLLRDNORM;
-
- return mask;
-}
-EXPORT_SYMBOL(drm_poll);
+++ /dev/null
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <linux/pagemap.h>
-#include "drmP.h"
-
-/** @file drm_gem.c
- *
- * This file provides some of the base ioctls and library routines for
- * the graphics memory manager implemented by each device driver.
- *
- * Because various devices have different requirements in terms of
- * synchronization and migration strategies, implementing that is left up to
- * the driver, and all that the general API provides should be generic --
- * allocating objects, reading/writing data with the cpu, freeing objects.
- * Even there, platform-dependent optimizations for reading/writing data with
- * the CPU mean we'll likely hook those out to driver-specific calls. However,
- * the DRI2 implementation wants to have at least allocate/mmap be generic.
- *
- * The goal was to have swap-backed object allocation managed through
- * struct file. However, file descriptors as handles to a struct file have
- * two major failings:
- * - Process limits prevent more than 1024 or so being used at a time by
- * default.
- * - Inability to allocate high fds will aggravate the X Server's select()
- * handling, and likely that of many GL client applications as well.
- *
- * This led to a plan of using our own integer IDs (called handles, following
- * DRM terminology) to mimic fds, and implement the fd syscalls we need as
- * ioctls. The objects themselves will still include the struct file so
- * that we can transition to fds if the required kernel infrastructure shows
- * up at a later date, and as our interface with shmfs for memory allocation.
- */
-
-/*
- * We make up offsets for buffer objects so we can recognize them at
- * mmap time.
- */
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-
-/**
- * Initialize the GEM device fields
- */
-
-int
-drm_gem_init(struct drm_device *dev)
-{
- struct drm_gem_mm *mm;
-
- spin_lock_init(&dev->object_name_lock);
- idr_init(&dev->object_name_idr);
- atomic_set(&dev->object_count, 0);
- atomic_set(&dev->object_memory, 0);
- atomic_set(&dev->pin_count, 0);
- atomic_set(&dev->pin_memory, 0);
- atomic_set(&dev->gtt_count, 0);
- atomic_set(&dev->gtt_memory, 0);
-
- mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
- if (!mm) {
- DRM_ERROR("out of memory\n");
- return -ENOMEM;
- }
-
- dev->mm_private = mm;
-
- if (drm_ht_create(&mm->offset_hash, 19)) {
- kfree(mm);
- return -ENOMEM;
- }
-
- if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE)) {
- drm_ht_remove(&mm->offset_hash);
- kfree(mm);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void
-drm_gem_destroy(struct drm_device *dev)
-{
- struct drm_gem_mm *mm = dev->mm_private;
-
- drm_mm_takedown(&mm->offset_manager);
- drm_ht_remove(&mm->offset_hash);
- kfree(mm);
- dev->mm_private = NULL;
-}
-
-/**
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
-{
- struct drm_gem_object *obj;
-
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- goto free;
-
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
- goto free;
-
- /* Basically we want to disable the OOM killer and handle ENOMEM
- * ourselves by sacrificing pages from cached buffers.
- * XXX shmem_file_[gs]et_gfp_mask()
- */
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_FS |
- __GFP_RECLAIMABLE |
- __GFP_NORETRY |
- __GFP_NOWARN |
- __GFP_NOMEMALLOC);
-
- kref_init(&obj->refcount);
- kref_init(&obj->handlecount);
- obj->size = size;
- if (dev->driver->gem_init_object != NULL &&
- dev->driver->gem_init_object(obj) != 0) {
- goto fput;
- }
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
- return obj;
-fput:
- fput(obj->filp);
-free:
- kfree(obj);
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_object_alloc);
-
-/**
- * Removes the mapping from handle to filp for this object.
- */
-static int
-drm_gem_handle_delete(struct drm_file *filp, u32 handle)
-{
- struct drm_device *dev;
- struct drm_gem_object *obj;
-
- /* This is gross. The idr system doesn't let us try a delete and
- * return an error code. It just spews if you fail at deleting.
- * So, we have to grab a lock around finding the object and then
- * doing the delete on it and dropping the refcount, or the user
- * could race us to double-decrement the refcount and cause a
- * use-after-free later. Given the frequency of our handle lookups,
- * we may want to use ida for number allocation and a hash table
- * for the pointers, anyway.
- */
- spin_lock(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- return -EINVAL;
- }
- dev = obj->dev;
-
- /* Release reference and decrement refcount. */
- idr_remove(&filp->object_idr, handle);
- spin_unlock(&filp->table_lock);
-
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
- */
-int
-drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- u32 *handlep)
-{
- int ret;
-
- /*
- * Get the user-visible handle using idr.
- */
-again:
- /* ensure there is space available to allocate a handle */
- if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
-
- /* do the allocation under our spinlock */
- spin_lock(&file_priv->table_lock);
- ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
- spin_unlock(&file_priv->table_lock);
- if (ret == -EAGAIN)
- goto again;
-
- if (ret != 0)
- return ret;
-
- drm_gem_object_handle_reference(obj);
- return 0;
-}
-EXPORT_SYMBOL(drm_gem_handle_create);
-
-/** Returns a reference to the object named by the handle. */
-struct drm_gem_object *
-drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
- u32 handle)
-{
- struct drm_gem_object *obj;
-
- spin_lock(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- return NULL;
- }
-
- drm_gem_object_reference(obj);
-
- spin_unlock(&filp->table_lock);
-
- return obj;
-}
-EXPORT_SYMBOL(drm_gem_object_lookup);
-
-/**
- * Releases the handle to an mm object.
- */
-int
-drm_gem_close_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_close *args = data;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- ret = drm_gem_handle_delete(file_priv, args->handle);
-
- return ret;
-}
-
-/**
- * Create a global name for an object, returning the name.
- *
- * Note that the name does not hold a reference; when the object
- * is freed, the name goes away.
- */
-int
-drm_gem_flink_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_flink *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
-
-again:
- if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
- ret = -ENOMEM;
- goto err;
- }
-
- spin_lock(&dev->object_name_lock);
- if (!obj->name) {
- ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
- &obj->name);
- args->name = (uint64_t) obj->name;
- spin_unlock(&dev->object_name_lock);
-
- if (ret == -EAGAIN)
- goto again;
-
- if (ret != 0)
- goto err;
-
- /* Allocate a reference for the name table. */
- drm_gem_object_reference(obj);
- } else {
- args->name = (uint64_t) obj->name;
- spin_unlock(&dev->object_name_lock);
- ret = 0;
- }
-
-err:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/**
- * Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
- */
-int
-drm_gem_open_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_open *args = data;
- struct drm_gem_object *obj;
- int ret;
- u32 handle;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- spin_lock(&dev->object_name_lock);
- obj = idr_find(&dev->object_name_idr, (int) args->name);
- if (obj)
- drm_gem_object_reference(obj);
- spin_unlock(&dev->object_name_lock);
- if (!obj)
- return -ENOENT;
-
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (ret)
- return ret;
-
- args->handle = handle;
- args->size = obj->size;
-
- return 0;
-}
-
-/**
- * Called at device open time, sets up the structure for handling refcounting
- * of mm objects.
- */
-void
-drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
-{
- idr_init(&file_private->object_idr);
- spin_lock_init(&file_private->table_lock);
-}
-
-/**
- * Called at device close to release the file's
- * handle references on objects.
- */
-static int
-drm_gem_object_release_handle(int id, void *ptr, void *data)
-{
- struct drm_gem_object *obj = ptr;
-
- drm_gem_object_handle_unreference(obj);
-
- return 0;
-}
-
-/**
- * Called at close time when the filp is going away.
- *
- * Releases any remaining references on objects by this filp.
- */
-void
-drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
-{
- mutex_lock(&dev->struct_mutex);
- idr_for_each(&file_private->object_idr,
- &drm_gem_object_release_handle, NULL);
-
- idr_destroy(&file_private->object_idr);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * Called after the last reference to the object has been lost.
- *
- * Frees the object
- */
-void
-drm_gem_object_free(struct kref *kref)
-{
- struct drm_gem_object *obj = (struct drm_gem_object *) kref;
- struct drm_device *dev = obj->dev;
-
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (dev->driver->gem_free_object != NULL)
- dev->driver->gem_free_object(obj);
-
- fput(obj->filp);
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
- kfree(obj);
-}
-EXPORT_SYMBOL(drm_gem_object_free);
-
-/**
- * Called after the last handle to the object has been closed
- *
- * Removes any name for the object. Note that this must be
- * called before drm_gem_object_free or we'll be touching
- * freed memory
- */
-void
-drm_gem_object_handle_free(struct kref *kref)
-{
- struct drm_gem_object *obj = container_of(kref,
- struct drm_gem_object,
- handlecount);
- struct drm_device *dev = obj->dev;
-
- /* Remove any name for this object */
- spin_lock(&dev->object_name_lock);
- if (obj->name) {
- idr_remove(&dev->object_name_idr, obj->name);
- obj->name = 0;
- spin_unlock(&dev->object_name_lock);
- /*
- * The object name held a reference to this object, drop
- * that now.
- */
- drm_gem_object_unreference(obj);
- } else
- spin_unlock(&dev->object_name_lock);
-
-}
-EXPORT_SYMBOL(drm_gem_object_handle_free);
-
-void drm_gem_vm_open(struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj = vma->vm_private_data;
-
- drm_gem_object_reference(obj);
-}
-EXPORT_SYMBOL(drm_gem_vm_open);
-
-void drm_gem_vm_close(struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-}
-EXPORT_SYMBOL(drm_gem_vm_close);
-
-
-/**
- * drm_gem_mmap - memory map routine for GEM objects
- * @filp: DRM file pointer
- * @vma: VMA for the area to be mapped
- *
- * If a driver supports GEM object mapping, mmap calls on the DRM file
- * descriptor will end up here.
- *
- * If we find the object based on the offset passed in (vma->vm_pgoff will
- * contain the fake offset we created when the GTT map ioctl was called on
- * the object), we set up the driver fault handler so that any accesses
- * to the object can be trapped, to perform migration, GTT binding, surface
- * register allocation, or performance monitoring.
- */
-int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_local_map *map = NULL;
- struct drm_gem_object *obj;
- struct drm_hash_item *hash;
- int ret = 0;
-
- mutex_lock(&dev->struct_mutex);
-
- if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
- mutex_unlock(&dev->struct_mutex);
- return drm_mmap(filp, vma);
- }
-
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map ||
- ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- /* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- obj = map->handle;
- if (!obj->dev->driver->gem_vm_ops) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
- vma->vm_ops = obj->dev->driver->gem_vm_ops;
- vma->vm_private_data = map->handle;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
- /* Take a ref for this mapping of the object, so that the fault
- * handler can dereference the mmap offset's pointer to the object.
- * This reference is cleaned up by the corresponding vm_close
- * (which should happen whether the vma was created by this call, or
- * by a vm_open due to mremap or partial unmap or whatever).
- */
- drm_gem_object_reference(obj);
-
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
-
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_mmap);
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-#include "drm_hashtab.h"
-#include <linux/hash.h>
-
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
-{
- unsigned int i;
-
- ht->size = 1 << order;
- ht->order = order;
- ht->fill = 0;
- ht->table = NULL;
- ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
- if (!ht->use_vmalloc) {
- ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
- }
- if (!ht->table) {
- ht->use_vmalloc = 1;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
- }
- if (!ht->table) {
- DRM_ERROR("Out of memory for hash table\n");
- return -ENOMEM;
- }
- for (i=0; i< ht->size; ++i) {
- INIT_HLIST_HEAD(&ht->table[i]);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_create);
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *list;
- unsigned int hashed_key;
- int count = 0;
-
- hashed_key = hash_long(key, ht->order);
- DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
- h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
- DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
- }
-}
-
-static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
- unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
- if (entry->key == key)
- return list;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *list, *parent;
- unsigned int hashed_key;
- unsigned long key = item->key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- parent = NULL;
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
- if (entry->key == key)
- return -EINVAL;
- if (entry->key > key)
- break;
- parent = list;
- }
- if (parent) {
- hlist_add_after(parent, &item->head);
- } else {
- hlist_add_head(&item->head, h_list);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_insert_item);
-
-/*
- * Just insert an item and return any "bits" bit key that hasn't been
- * used before.
- */
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add)
-{
- int ret;
- unsigned long mask = (1 << bits) - 1;
- unsigned long first, unshifted_key;
-
- unshifted_key = hash_long(seed, bits);
- first = unshifted_key;
- do {
- item->key = (unshifted_key << shift) + add;
- ret = drm_ht_insert_item(ht, item);
- if (ret)
- unshifted_key = (unshifted_key + 1) & mask;
- } while(ret && (unshifted_key != first));
-
- if (ret) {
- DRM_ERROR("Available key bit space exhausted\n");
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_just_insert_please);
-
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
- struct drm_hash_item **item)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key(ht, key);
- if (!list)
- return -EINVAL;
-
- *item = hlist_entry(list, struct drm_hash_item, head);
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_find_item);
-
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key(ht, key);
- if (list) {
- hlist_del_init(list);
- ht->fill--;
- return 0;
- }
- return -EINVAL;
-}
-
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- hlist_del_init(&item->head);
- ht->fill--;
- return 0;
-}
-EXPORT_SYMBOL(drm_ht_remove_item);
-
-void drm_ht_remove(struct drm_open_hash *ht)
-{
- if (ht->table) {
- if (ht->use_vmalloc)
- vfree(ht->table);
- else
- kfree(ht->table);
- ht->table = NULL;
- }
-}
-EXPORT_SYMBOL(drm_ht_remove);
+++ /dev/null
-/**
- * \file drm_info.c
- * DRM info file implementations
- *
- * \author Ben Gamari <bgamari@gmail.com>
- */
-
-/*
- * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2008 Ben Gamari <bgamari@gmail.com>
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include "drmP.h"
-
-/**
- * Called when "/proc/dri/.../name" is read.
- *
- * Prints the device name together with the bus id if available.
- */
-int drm_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
- struct drm_master *master = minor->master;
-
- if (!master)
- return 0;
-
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->pci_driver.name,
- pci_name(dev->pdev), master->unique);
- } else {
- seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
- pci_name(dev->pdev));
- }
-
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../vm" is read.
- *
- * Prints information about all mappings in drm_device::maplist.
- */
-int drm_vm_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
-
- /* Hardcoded from _DRM_FRAME_BUFFER,
- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
- const char *type;
- int i;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "slot offset size type flags address mtrr\n\n");
- i = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- map = r_list->map;
- if (!map)
- continue;
- if (map->type < 0 || map->type > 5)
- type = "??";
- else
- type = types[map->type];
-
- seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s 0x%02x 0x%08lx ",
- i,
- (unsigned long long)map->offset,
- map->size, type, map->flags,
- (unsigned long) r_list->user_token);
- if (map->mtrr < 0)
- seq_printf(m, "none\n");
- else
- seq_printf(m, "%4d\n", map->mtrr);
- i++;
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../queues" is read.
- */
-int drm_queues_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int i;
- struct drm_queue *q;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, " ctx/flags use fin"
- " blk/rw/rwf wait flushed queued"
- " locks\n\n");
- for (i = 0; i < dev->queue_count; i++) {
- q = dev->queuelist[i];
- atomic_inc(&q->use_count);
- seq_printf(m, "%5d/0x%03x %5d %5d"
- " %5d/%c%c/%c%c%c %5Zd\n",
- i,
- q->flags,
- atomic_read(&q->use_count),
- atomic_read(&q->finalization),
- atomic_read(&q->block_count),
- atomic_read(&q->block_read) ? 'r' : '-',
- atomic_read(&q->block_write) ? 'w' : '-',
- waitqueue_active(&q->read_queue) ? 'r' : '-',
- waitqueue_active(&q->write_queue) ? 'w' : '-',
- waitqueue_active(&q->flush_queue) ? 'f' : '-',
- DRM_BUFCOUNT(&q->waitlist));
- atomic_dec(&q->use_count);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../bufs" is read.
- */
-int drm_bufs_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_device_dma *dma;
- int i, seg_pages;
-
- mutex_lock(&dev->struct_mutex);
- dma = dev->dma;
- if (!dma) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- seq_printf(m, " o size count free segs pages kB\n\n");
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].buf_count) {
- seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
- seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
- i,
- dma->bufs[i].buf_size,
- dma->bufs[i].buf_count,
- atomic_read(&dma->bufs[i].freelist.count),
- dma->bufs[i].seg_count,
- seg_pages,
- seg_pages * PAGE_SIZE / 1024);
- }
- }
- seq_printf(m, "\n");
- for (i = 0; i < dma->buf_count; i++) {
- if (i && !(i % 32))
- seq_printf(m, "\n");
- seq_printf(m, " %d", dma->buflist[i]->list);
- }
- seq_printf(m, "\n");
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../vblank" is read.
- */
-int drm_vblank_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int crtc;
-
- mutex_lock(&dev->struct_mutex);
- for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
- seq_printf(m, "CRTC %d enable: %d\n",
- crtc, atomic_read(&dev->vblank_refcount[crtc]));
- seq_printf(m, "CRTC %d counter: %d\n",
- crtc, drm_vblank_count(dev, crtc));
- seq_printf(m, "CRTC %d last wait: %d\n",
- crtc, dev->last_vblank_wait[crtc]);
- seq_printf(m, "CRTC %d in modeset: %d\n",
- crtc, dev->vblank_inmodeset[crtc]);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../clients" is read.
- *
- */
-int drm_clients_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_file *priv;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "a dev pid uid magic ioctls\n\n");
- list_for_each_entry(priv, &dev->filelist, lhead) {
- seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
- priv->authenticated ? 'y' : 'n',
- priv->minor->index,
- priv->pid,
- priv->uid, priv->magic, priv->ioctl_count);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-
-int drm_gem_one_name_info(int id, void *ptr, void *data)
-{
- struct drm_gem_object *obj = ptr;
- struct seq_file *m = data;
-
- seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
-
- seq_printf(m, "%6d %8zd %7d %8d\n",
- obj->name, obj->size,
- atomic_read(&obj->handlecount.refcount),
- atomic_read(&obj->refcount.refcount));
- return 0;
-}
-
-int drm_gem_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, " name size handles refcount\n");
- idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
- return 0;
-}
-
-int drm_gem_object_info(struct seq_file *m, void* data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
- seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
- seq_printf(m, "%d gtt total\n", dev->gtt_total);
- return 0;
-}
-
-#if DRM_DEBUG_CODE
-
-int drm_vma_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_vma_entry *pt;
- struct vm_area_struct *vma;
-#if defined(__i386__)
- unsigned int pgprot;
-#endif
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
- atomic_read(&dev->vma_count),
- high_memory, (u64)virt_to_phys(high_memory));
-
- list_for_each_entry(pt, &dev->vmalist, head) {
- vma = pt->vma;
- if (!vma)
- continue;
- seq_printf(m,
- "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
- pt->pid, vma->vm_start, vma->vm_end,
- vma->vm_flags & VM_READ ? 'r' : '-',
- vma->vm_flags & VM_WRITE ? 'w' : '-',
- vma->vm_flags & VM_EXEC ? 'x' : '-',
- vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
- vma->vm_flags & VM_LOCKED ? 'l' : '-',
- vma->vm_flags & VM_IO ? 'i' : '-',
- vma->vm_pgoff);
-
-#if defined(__i386__)
- pgprot = pgprot_val(vma->vm_page_prot);
- seq_printf(m, " %c%c%c%c%c%c%c%c%c",
- pgprot & _PAGE_PRESENT ? 'p' : '-',
- pgprot & _PAGE_RW ? 'w' : 'r',
- pgprot & _PAGE_USER ? 'u' : 's',
- pgprot & _PAGE_PWT ? 't' : 'b',
- pgprot & _PAGE_PCD ? 'u' : 'c',
- pgprot & _PAGE_ACCESSED ? 'a' : '-',
- pgprot & _PAGE_DIRTY ? 'd' : '-',
- pgprot & _PAGE_PSE ? 'm' : 'k',
- pgprot & _PAGE_GLOBAL ? 'g' : 'l');
-#endif
- seq_printf(m, "\n");
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-#endif
-
+++ /dev/null
-/**
- * \file drm_ioctl.c
- * IOCTL processing for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include "drm_core.h"
-
-#include "linux/pci.h"
-
-/**
- * Get the bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_unique structure.
- * \return zero on success or a negative number on failure.
- *
- * Copies the bus id from drm_device::unique into user space.
- */
-int drm_getunique(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
-
- if (u->unique_len >= master->unique_len) {
- if (copy_to_user(u->unique, master->unique, master->unique_len))
- return -EFAULT;
- }
- u->unique_len = master->unique_len;
-
- return 0;
-}
-
-/**
- * Set the bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_unique structure.
- * \return zero on success or a negative number on failure.
- *
- * Copies the bus id from userspace into drm_device::unique, and verifies that
- * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
- * in interface version 1.1 and will return EBUSY when setversion has requested
- * version 1.1 or greater.
- */
-int drm_setunique(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
- int domain, bus, slot, func, ret;
-
- if (master->unique_len || master->unique)
- return -EBUSY;
-
- if (!u->unique_len || u->unique_len > 1024)
- return -EINVAL;
-
- master->unique_len = u->unique_len;
- master->unique_size = u->unique_len + 1;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (!master->unique)
- return -ENOMEM;
- if (copy_from_user(master->unique, u->unique, master->unique_len))
- return -EFAULT;
-
- master->unique[master->unique_len] = '\0';
-
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname)
- return -ENOMEM;
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
-
- /* Return error if the busid submitted doesn't match the device's actual
- * busid.
- */
- ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3)
- return -EINVAL;
- domain = bus >> 8;
- bus &= 0xff;
-
- if ((domain != drm_get_pci_domain(dev)) ||
- (bus != dev->pdev->bus->number) ||
- (slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn)))
- return -EINVAL;
-
- return 0;
-}
-
-static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
- int len;
-
- if (master->unique != NULL)
- return -EBUSY;
-
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
- if (len >= master->unique_len)
- DRM_ERROR("buffer overflow");
- else
- master->unique_len = len;
-
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- master->unique_len + 2, GFP_KERNEL);
- if (dev->devname == NULL)
- return -ENOMEM;
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
-
- return 0;
-}
-
-/**
- * Get a mapping information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_map structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the mapping with the specified offset and copies its information
- * into userspace
- */
-int drm_getmap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *r_list = NULL;
- struct list_head *list;
- int idx;
- int i;
-
- idx = map->offset;
-
- mutex_lock(&dev->struct_mutex);
- if (idx < 0) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- i = 0;
- list_for_each(list, &dev->maplist) {
- if (i == idx) {
- r_list = list_entry(list, struct drm_map_list, head);
- break;
- }
- i++;
- }
- if (!r_list || !r_list->map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- map->offset = r_list->map->offset;
- map->size = r_list->map->size;
- map->type = r_list->map->type;
- map->flags = r_list->map->flags;
- map->handle = (void *)(unsigned long) r_list->user_token;
- map->mtrr = r_list->map->mtrr;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Get client information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_client structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the client with the specified index and copies its information
- * into userspace
- */
-int drm_getclient(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_client *client = data;
- struct drm_file *pt;
- int idx;
- int i;
-
- idx = client->idx;
- mutex_lock(&dev->struct_mutex);
-
- i = 0;
- list_for_each_entry(pt, &dev->filelist, lhead) {
- if (i++ >= idx) {
- client->auth = pt->authenticated;
- client->pid = pt->pid;
- client->uid = pt->uid;
- client->magic = pt->magic;
- client->iocs = pt->ioctl_count;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
- }
- }
- mutex_unlock(&dev->struct_mutex);
-
- return -EINVAL;
-}
-
-/**
- * Get statistics information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_stats structure.
- *
- * \return zero on success or a negative number on failure.
- */
-int drm_getstats(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_stats *stats = data;
- int i;
-
- memset(stats, 0, sizeof(*stats));
-
- mutex_lock(&dev->struct_mutex);
-
- for (i = 0; i < dev->counters; i++) {
- if (dev->types[i] == _DRM_STAT_LOCK)
- stats->data[i].value =
- (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
- else
- stats->data[i].value = atomic_read(&dev->counts[i]);
- stats->data[i].type = dev->types[i];
- }
-
- stats->count = dev->counters;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Setversion ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Sets the requested interface version
- */
-int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_set_version *sv = data;
- int if_version, retcode = 0;
-
- if (sv->drm_di_major != -1) {
- if (sv->drm_di_major != DRM_IF_MAJOR ||
- sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
- retcode = -EINVAL;
- goto done;
- }
- if_version = DRM_IF_VERSION(sv->drm_di_major,
- sv->drm_di_minor);
- dev->if_version = max(if_version, dev->if_version);
- if (sv->drm_di_minor >= 1) {
- /*
- * Version 1.1 includes tying of DRM to specific device
- */
- drm_set_busid(dev, file_priv);
- }
- }
-
- if (sv->drm_dd_major != -1) {
- if (sv->drm_dd_major != dev->driver->major ||
- sv->drm_dd_minor < 0 || sv->drm_dd_minor >
- dev->driver->minor) {
- retcode = -EINVAL;
- goto done;
- }
-
- if (dev->driver->set_version)
- dev->driver->set_version(dev, sv);
- }
-
-done:
- sv->drm_di_major = DRM_IF_MAJOR;
- sv->drm_di_minor = DRM_IF_MINOR;
- sv->drm_dd_major = dev->driver->major;
- sv->drm_dd_minor = dev->driver->minor;
-
- return retcode;
-}
-
-/** No-op ioctl. */
-int drm_noop(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DRM_DEBUG("\n");
- return 0;
-}
+++ /dev/null
-/**
- * \file drm_irq.c
- * IRQ support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-#include <linux/interrupt.h> /* For task queue support */
-
-#include <linux/vgaarb.h>
-/**
- * Get interrupt from bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_irq_busid structure.
- * \return zero on success or a negative number on failure.
- *
- * Finds the PCI device with the specified bus id and gets its IRQ number.
- * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
- * to that of the device that this DRM instance attached to.
- */
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_irq_busid *p = data;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
- return -EINVAL;
-
- p->irq = dev->pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
-
- return 0;
-}
-
-static void vblank_disable_fn(unsigned long arg)
-{
- struct drm_device *dev = (struct drm_device *)arg;
- unsigned long irqflags;
- int i;
-
- if (!dev->vblank_disable_allowed)
- return;
-
- for (i = 0; i < dev->num_crtcs; i++) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
- dev->vblank_enabled[i]) {
- DRM_DEBUG("disabling vblank on crtc %d\n", i);
- dev->last_vblank[i] =
- dev->driver->get_vblank_counter(dev, i);
- dev->driver->disable_vblank(dev, i);
- dev->vblank_enabled[i] = 0;
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- }
-}
-
-void drm_vblank_cleanup(struct drm_device *dev)
-{
- /* Bail if the driver didn't call drm_vblank_init() */
- if (dev->num_crtcs == 0)
- return;
-
- del_timer(&dev->vblank_disable_timer);
-
- vblank_disable_fn((unsigned long)dev);
-
- kfree(dev->vbl_queue);
- kfree(dev->_vblank_count);
- kfree(dev->vblank_refcount);
- kfree(dev->vblank_enabled);
- kfree(dev->last_vblank);
- kfree(dev->last_vblank_wait);
- kfree(dev->vblank_inmodeset);
-
- dev->num_crtcs = 0;
-}
-
-int drm_vblank_init(struct drm_device *dev, int num_crtcs)
-{
- int i, ret = -ENOMEM;
-
- setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
- (unsigned long)dev);
- spin_lock_init(&dev->vbl_lock);
- dev->num_crtcs = num_crtcs;
-
- dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vbl_queue)
- goto err;
-
- dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
- if (!dev->_vblank_count)
- goto err;
-
- dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vblank_refcount)
- goto err;
-
- dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_enabled)
- goto err;
-
- dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank)
- goto err;
-
- dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank_wait)
- goto err;
-
- dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_inmodeset)
- goto err;
-
- /* Zero per-crtc vblank stuff */
- for (i = 0; i < num_crtcs; i++) {
- init_waitqueue_head(&dev->vbl_queue[i]);
- atomic_set(&dev->_vblank_count[i], 0);
- atomic_set(&dev->vblank_refcount[i], 0);
- }
-
- dev->vblank_disable_allowed = 0;
-
- return 0;
-
-err:
- drm_vblank_cleanup(dev);
- return ret;
-}
-EXPORT_SYMBOL(drm_vblank_init);
-
-static void drm_irq_vgaarb_nokms(void *cookie, bool state)
-{
- struct drm_device *dev = cookie;
-
- if (dev->driver->vgaarb_irq) {
- dev->driver->vgaarb_irq(dev, state);
- return;
- }
-
- if (!dev->irq_enabled)
- return;
-
- if (state)
- dev->driver->irq_uninstall(dev);
- else {
- dev->driver->irq_preinstall(dev);
- dev->driver->irq_postinstall(dev);
- }
-}
-
-/**
- * Install IRQ handler.
- *
- * \param dev DRM device.
- *
- * Initializes the IRQ related data. Installs the handler, calling the driver
- * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
- * before and after the installation.
- */
-int drm_irq_install(struct drm_device *dev)
-{
- int ret = 0;
- unsigned long sh_flags = 0;
- char *irqname;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
- if (dev->pdev->irq == 0)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
-
- /* Driver must have been initialized */
- if (!dev->dev_private) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- if (dev->irq_enabled) {
- mutex_unlock(&dev->struct_mutex);
- return -EBUSY;
- }
- dev->irq_enabled = 1;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
-
- /* Before installing handler */
- dev->driver->irq_preinstall(dev);
-
- /* Install handler */
- if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
- sh_flags = IRQF_SHARED;
-
- if (dev->devname)
- irqname = dev->devname;
- else
- irqname = dev->driver->name;
-
- ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
- sh_flags, irqname, dev);
-
- if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
-
- /* After installing handler */
- ret = dev->driver->irq_postinstall(dev);
- if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(drm_irq_install);
-
-/**
- * Uninstall the IRQ handler.
- *
- * \param dev DRM device.
- *
- * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
- */
-int drm_irq_uninstall(struct drm_device * dev)
-{
- unsigned long irqflags;
- int irq_enabled, i;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- irq_enabled = dev->irq_enabled;
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * Wake up any waiters so they don't hang.
- */
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queue[i]);
- dev->vblank_enabled[i] = 0;
- dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- if (!irq_enabled)
- return -EINVAL;
-
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- vga_client_register(dev->pdev, NULL, NULL, NULL);
-
- dev->driver->irq_uninstall(dev);
-
- free_irq(dev->pdev->irq, dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_irq_uninstall);
-
-/**
- * IRQ control ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_control structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls irq_install() or irq_uninstall() according to \p arg.
- */
-int drm_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_control *ctl = data;
-
- /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
-
-
- switch (ctl->func) {
- case DRM_INST_HANDLER:
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
- if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != dev->pdev->irq)
- return -EINVAL;
- return drm_irq_install(dev);
- case DRM_UNINST_HANDLER:
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
- return drm_irq_uninstall(dev);
- default:
- return -EINVAL;
- }
-}
-
-/**
- * drm_vblank_count - retrieve "cooked" vblank counter value
- * @dev: DRM device
- * @crtc: which counter to retrieve
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity.
- */
-u32 drm_vblank_count(struct drm_device *dev, int crtc)
-{
- return atomic_read(&dev->_vblank_count[crtc]);
-}
-EXPORT_SYMBOL(drm_vblank_count);
-
-/**
- * drm_update_vblank_count - update the master vblank counter
- * @dev: DRM device
- * @crtc: counter to update
- *
- * Call back into the driver to update the appropriate vblank counter
- * (specified by @crtc). Deal with wraparound, if it occurred, and
- * update the last read value so we can deal with wraparound on the next
- * call if necessary.
- *
- * Only necessary when going from off->on, to account for frames we
- * didn't get an interrupt for.
- *
- * Note: caller must hold dev->vbl_lock since this reads & writes
- * device vblank fields.
- */
-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
-{
- u32 cur_vblank, diff;
-
- /*
- * Interrupts were disabled prior to this call, so deal with counter
- * wrap if needed.
- * NOTE! It's possible we lost a full dev->max_vblank_count events
- * here if the register is small or we had vblank interrupts off for
- * a long time.
- */
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
- diff = cur_vblank - dev->last_vblank[crtc];
- if (cur_vblank < dev->last_vblank[crtc]) {
- diff += dev->max_vblank_count;
-
- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, dev->last_vblank[crtc], cur_vblank, diff);
- }
-
- DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
- crtc, diff);
-
- atomic_add(diff, &dev->_vblank_count[crtc]);
-}
-
-/**
- * drm_vblank_get - get a reference count on vblank events
- * @dev: DRM device
- * @crtc: which CRTC to own
- *
- * Acquire a reference count on vblank events to avoid having them disabled
- * while in use.
- *
- * RETURNS
- * Zero on success, nonzero on failure.
- */
-int drm_vblank_get(struct drm_device *dev, int crtc)
-{
- unsigned long irqflags;
- int ret = 0;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- /* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
- if (!dev->vblank_enabled[crtc]) {
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
- if (ret)
- atomic_dec(&dev->vblank_refcount[crtc]);
- else {
- dev->vblank_enabled[crtc] = 1;
- drm_update_vblank_count(dev, crtc);
- }
- }
- } else {
- if (!dev->vblank_enabled[crtc]) {
- atomic_dec(&dev->vblank_refcount[crtc]);
- ret = -EINVAL;
- }
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_vblank_get);
-
-/**
- * drm_vblank_put - give up ownership of vblank events
- * @dev: DRM device
- * @crtc: which counter to give up
- *
- * Release ownership of a given vblank counter, turning off interrupts
- * if possible.
- */
-void drm_vblank_put(struct drm_device *dev, int crtc)
-{
- BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
-
- /* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
- mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
-}
-EXPORT_SYMBOL(drm_vblank_put);
-
-void drm_vblank_off(struct drm_device *dev, int crtc)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
- dev->vblank_enabled[crtc] = 0;
- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-}
-EXPORT_SYMBOL(drm_vblank_off);
-
-/**
- * drm_vblank_pre_modeset - account for vblanks across mode sets
- * @dev: DRM device
- * @crtc: CRTC in question
- * @post: post or pre mode set?
- *
- * Account for vblank events across mode setting events, which will likely
- * reset the hardware frame counter.
- */
-void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
-{
- /*
- * To avoid all the problems that might happen if interrupts
- * were enabled/disabled around or between these calls, we just
- * have the kernel take a reference on the CRTC (just once though
- * to avoid corrupting the count if multiple, mismatch calls occur),
- * so that interrupts remain enabled in the interim.
- */
- if (!dev->vblank_inmodeset[crtc]) {
- dev->vblank_inmodeset[crtc] = 0x1;
- if (drm_vblank_get(dev, crtc) == 0)
- dev->vblank_inmodeset[crtc] |= 0x2;
- }
-}
-EXPORT_SYMBOL(drm_vblank_pre_modeset);
-
-void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
-{
- unsigned long irqflags;
-
- if (dev->vblank_inmodeset[crtc]) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->vblank_disable_allowed = 1;
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- if (dev->vblank_inmodeset[crtc] & 0x2)
- drm_vblank_put(dev, crtc);
-
- dev->vblank_inmodeset[crtc] = 0;
- }
-}
-EXPORT_SYMBOL(drm_vblank_post_modeset);
-
-/**
- * drm_modeset_ctl - handle vblank event counter changes across mode switch
- * @DRM_IOCTL_ARGS: standard ioctl arguments
- *
- * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
- * ioctls around modesetting so that any lost vblank events are accounted for.
- *
- * Generally the counter will reset across mode sets. If interrupts are
- * enabled around this call, we don't have to do anything since the counter
- * will have already been incremented.
- */
-int drm_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
-
- /* If drm_vblank_init() hasn't been called yet, just no-op */
- if (!dev->num_crtcs)
- goto out;
-
- crtc = modeset->crtc;
- if (crtc >= dev->num_crtcs) {
- ret = -EINVAL;
- goto out;
- }
-
- switch (modeset->cmd) {
- case _DRM_PRE_MODESET:
- drm_vblank_pre_modeset(dev, crtc);
- break;
- case _DRM_POST_MODESET:
- drm_vblank_post_modeset(dev, crtc);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
-out:
- return ret;
-}
-
-static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
- union drm_wait_vblank *vblwait,
- struct drm_file *file_priv)
-{
- struct drm_pending_vblank_event *e;
- struct timeval now;
- unsigned long flags;
- unsigned int seq;
-
- e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL)
- return -ENOMEM;
-
- e->pipe = pipe;
- e->event.base.type = DRM_EVENT_VBLANK;
- e->event.base.length = sizeof e->event;
- e->event.user_data = vblwait->request.signal;
- e->base.event = &e->event.base;
- e->base.file_priv = file_priv;
- e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
-
- do_gettimeofday(&now);
- spin_lock_irqsave(&dev->event_lock, flags);
-
- if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- return -ENOMEM;
- }
-
- file_priv->event_space -= sizeof e->event;
- seq = drm_vblank_count(dev, pipe);
- if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1 << 23)) {
- vblwait->request.sequence = seq + 1;
- vblwait->reply.sequence = vblwait->request.sequence;
- }
-
- DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
- vblwait->request.sequence, seq, pipe);
-
- e->event.sequence = vblwait->request.sequence;
- if ((seq - vblwait->request.sequence) <= (1 << 23)) {
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- drm_vblank_put(dev, e->pipe);
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- } else {
- list_add_tail(&e->base.link, &dev->vblank_event_list);
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return 0;
-}
-
-/**
- * Wait for VBLANK.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param data user argument, pointing to a drm_wait_vblank structure.
- * \return zero on success or a negative number on failure.
- *
- * This function enables the vblank interrupt on the pipe requested, then
- * sleeps waiting for the requested sequence number to occur, and drops
- * the vblank interrupt refcount afterwards. (vblank irq disable follows that
- * after a timeout with no further vblank waits scheduled).
- */
-int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- union drm_wait_vblank *vblwait = data;
- int ret = 0;
- unsigned int flags, seq, crtc;
-
- if ((!dev->pdev->irq) || (!dev->irq_enabled))
- return -EINVAL;
-
- if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
- return -EINVAL;
-
- if (vblwait->request.type &
- ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
- DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
- vblwait->request.type,
- (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
- return -EINVAL;
- }
-
- flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-
- if (crtc >= dev->num_crtcs)
- return -EINVAL;
-
- ret = drm_vblank_get(dev, crtc);
- if (ret) {
- DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
- return ret;
- }
- seq = drm_vblank_count(dev, crtc);
-
- switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
- case _DRM_VBLANK_RELATIVE:
- vblwait->request.sequence += seq;
- vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
- case _DRM_VBLANK_ABSOLUTE:
- break;
- default:
- ret = -EINVAL;
- goto done;
- }
-
- if (flags & _DRM_VBLANK_EVENT)
- return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
-
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1<<23)) {
- vblwait->request.sequence = seq + 1;
- }
-
- DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
- vblwait->request.sequence, crtc);
- dev->last_vblank_wait[crtc] = vblwait->request.sequence;
- DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
- (((drm_vblank_count(dev, crtc) -
- vblwait->request.sequence) <= (1 << 23)) ||
- !dev->irq_enabled));
-
- if (ret != -EINTR) {
- struct timeval now;
-
- do_gettimeofday(&now);
-
- vblwait->reply.tval_sec = now.tv_sec;
- vblwait->reply.tval_usec = now.tv_usec;
- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
- DRM_DEBUG("returning %d to client\n",
- vblwait->reply.sequence);
- } else {
- DRM_DEBUG("vblank wait interrupted by signal\n");
- }
-
-done:
- drm_vblank_put(dev, crtc);
- return ret;
-}
-
-void drm_handle_vblank_events(struct drm_device *dev, int crtc)
-{
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long flags;
- unsigned int seq;
-
- do_gettimeofday(&now);
- seq = drm_vblank_count(dev, crtc);
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
- if (e->pipe != crtc)
- continue;
- if ((seq - e->event.sequence) > (1<<23))
- continue;
-
- DRM_DEBUG("vblank event on %d, current %d\n",
- e->event.sequence, seq);
-
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-/**
- * drm_handle_vblank - handle a vblank event
- * @dev: DRM device
- * @crtc: where this event occurred
- *
- * Drivers should call this routine in their vblank interrupt handlers to
- * update the vblank counter and send any signals that may be pending.
- */
-void drm_handle_vblank(struct drm_device *dev, int crtc)
-{
- if (!dev->num_crtcs)
- return;
-
- atomic_inc(&dev->_vblank_count[crtc]);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
- drm_handle_vblank_events(dev, crtc);
-}
-EXPORT_SYMBOL(drm_handle_vblank);
+++ /dev/null
-/**
- * \file drm_lock.c
- * IOCTLs for locking
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-static int drm_notifier(void *priv);
-
-/**
- * Lock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Add the current task to the lock wait queue, and attempt to take to lock.
- */
-int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- DECLARE_WAITQUEUE(entry, current);
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
- int ret = 0;
-
- ++file_priv->lock_count;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock->context, task_pid_nr(current),
- master->lock.hw_lock->lock, lock->flags);
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
- if (lock->context < 0)
- return -EINVAL;
-
- add_wait_queue(&master->lock.lock_queue, &entry);
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters++;
- spin_unlock_bh(&master->lock.spinlock);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (!master->lock.hw_lock) {
- /* Device has been unregistered */
- send_sig(SIGTERM, current, 0);
- ret = -EINTR;
- break;
- }
- if (drm_lock_take(&master->lock, lock->context)) {
- master->lock.file_priv = file_priv;
- master->lock.lock_time = jiffies;
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
- break; /* Got lock */
- }
-
- /* Contention */
- schedule();
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters--;
- spin_unlock_bh(&master->lock.spinlock);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&master->lock.lock_queue, &entry);
-
- DRM_DEBUG("%d %s\n", lock->context,
- ret ? "interrupted" : "has lock");
- if (ret) return ret;
-
- /* don't set the block all signals on the master process for now
- * really probably not the correct answer but lets us debug xkb
- * xserver for now */
- if (!file_priv->is_master) {
- sigemptyset(&dev->sigmask);
- sigaddset(&dev->sigmask, SIGSTOP);
- sigaddset(&dev->sigmask, SIGTSTP);
- sigaddset(&dev->sigmask, SIGTTIN);
- sigaddset(&dev->sigmask, SIGTTOU);
- dev->sigdata.context = lock->context;
- dev->sigdata.lock = master->lock.hw_lock;
- block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
- }
-
- if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
- dev->driver->dma_ready(dev);
-
- if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
- {
- if (dev->driver->dma_quiescent(dev)) {
- DRM_DEBUG("%d waiting for DMA quiescent\n",
- lock->context);
- return -EBUSY;
- }
- }
-
- if (dev->driver->kernel_context_switch &&
- dev->last_context != lock->context) {
- dev->driver->kernel_context_switch(dev, dev->last_context,
- lock->context);
- }
-
- return 0;
-}
-
-/**
- * Unlock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Transfer and free the lock.
- */
-int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-
- /* kernel_context_switch isn't used by any of the x86 drm
- * modules but is required by the Sparc driver.
- */
- if (dev->driver->kernel_context_switch_unlock)
- dev->driver->kernel_context_switch_unlock(dev);
- else {
- if (drm_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
- }
- }
-
- unblock_all_signals();
- return 0;
-}
-
-/**
- * Take the heavyweight lock.
- *
- * \param lock lock pointer.
- * \param context locking context.
- * \return one if the lock is held, or zero otherwise.
- *
- * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
- */
-int drm_lock_take(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- do {
- old = *lock;
- if (old & _DRM_LOCK_HELD)
- new = old | _DRM_LOCK_CONT;
- else {
- new = context | _DRM_LOCK_HELD |
- ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
- _DRM_LOCK_CONT : 0);
- }
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- spin_unlock_bh(&lock_data->spinlock);
-
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
- }
- }
-
- if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
- /* Have lock */
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_lock_take);
-
-/**
- * This takes a lock forcibly and hands it to context. Should ONLY be used
- * inside *_unlock to give lock to kernel before calling *_dma_schedule.
- *
- * \param dev DRM device.
- * \param lock lock pointer.
- * \param context locking context.
- * \return always one.
- *
- * Resets the lock file pointer.
- * Marks the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static int drm_lock_transfer(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- lock_data->file_priv = NULL;
- do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- return 1;
-}
-
-/**
- * Free lock.
- *
- * \param dev DRM device.
- * \param lock lock.
- * \param context context.
- *
- * Resets the lock file pointer.
- * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
- * waiting on the lock queue.
- */
-int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (lock_data->kernel_waiters != 0) {
- drm_lock_transfer(lock_data, 0);
- lock_data->idle_has_lock = 1;
- spin_unlock_bh(&lock_data->spinlock);
- return 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-
- do {
- old = *lock;
- new = _DRM_LOCKING_CONTEXT(old);
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
-
- if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
- DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- return 1;
- }
- wake_up_interruptible(&lock_data->lock_queue);
- return 0;
-}
-EXPORT_SYMBOL(drm_lock_free);
-
-/**
- * If we get here, it means that the process has called DRM_IOCTL_LOCK
- * without calling DRM_IOCTL_UNLOCK.
- *
- * If the lock is not held, then let the signal proceed as usual. If the lock
- * is held, then set the contended flag and keep the signal blocked.
- *
- * \param priv pointer to a drm_sigdata structure.
- * \return one if the signal should be delivered normally, or zero if the
- * signal should be blocked.
- */
-static int drm_notifier(void *priv)
-{
- struct drm_sigdata *s = (struct drm_sigdata *) priv;
- unsigned int old, new, prev;
-
- /* Allow signal delivery if lock isn't held */
- if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
- || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
- return 1;
-
- /* Otherwise, set flag to force call to
- drmUnlock */
- do {
- old = s->lock->lock;
- new = old | _DRM_LOCK_CONT;
- prev = cmpxchg(&s->lock->lock, old, new);
- } while (prev != old);
- return 0;
-}
-
-/**
- * This function returns immediately and takes the hw lock
- * with the kernel context if it is free, otherwise it gets the highest priority when and if
- * it is eventually released.
- *
- * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
- * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
- * a deadlock, which is why the "idlelock" was invented).
- *
- * This should be sufficient to wait for GPU idle without
- * having to worry about starvation.
- */
-
-void drm_idlelock_take(struct drm_lock_data *lock_data)
-{
- int ret = 0;
-
- spin_lock_bh(&lock_data->spinlock);
- lock_data->kernel_waiters++;
- if (!lock_data->idle_has_lock) {
-
- spin_unlock_bh(&lock_data->spinlock);
- ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
- spin_lock_bh(&lock_data->spinlock);
-
- if (ret == 1)
- lock_data->idle_has_lock = 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_idlelock_take);
-
-void drm_idlelock_release(struct drm_lock_data *lock_data)
-{
- unsigned int old, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (--lock_data->kernel_waiters == 0) {
- if (lock_data->idle_has_lock) {
- do {
- old = *lock;
- prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
- } while (prev != old);
- wake_up_interruptible(&lock_data->lock_queue);
- lock_data->idle_has_lock = 0;
- }
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_idlelock_release);
-
-
-int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
- return (file_priv->lock_count && master->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
- master->lock.file_priv == file_priv);
-}
-
-EXPORT_SYMBOL(drm_i_have_hw_lock);
+++ /dev/null
-/**
- * \file drm_memory.c
- * Memory management wrappers for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/highmem.h>
-#include "drmP.h"
-
-/**
- * Called when "/proc/dri/%dev%/mem" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param len requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * No-op.
- */
-int drm_mem_info(char *buf, char **start, off_t offset,
- int len, int *eof, void *data)
-{
- return 0;
-}
-
-#if __OS_HAS_AGP
-static void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device * dev)
-{
- unsigned long i, num_pages =
- PAGE_ALIGN(size) / PAGE_SIZE;
- struct drm_agp_mem *agpmem;
- struct page **page_map;
- struct page **phys_page_map;
- void *addr;
-
- size = PAGE_ALIGN(size);
-
-#ifdef __alpha__
- offset -= dev->hose->mem_space->start;
-#endif
-
- list_for_each_entry(agpmem, &dev->agp->memory, head)
- if (agpmem->bound <= offset
- && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
- (offset + size))
- break;
- if (!agpmem)
- return NULL;
-
- /*
- * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
- * the CPU do not get remapped by the GART. We fix this by using the kernel's
- * page-table instead (that's probably faster anyhow...).
- */
- /* note: use vmalloc() because num_pages could be large... */
- page_map = vmalloc(num_pages * sizeof(struct page *));
- if (!page_map)
- return NULL;
-
- phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
- for (i = 0; i < num_pages; ++i)
- page_map[i] = phys_page_map[i];
- addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
- vfree(page_map);
-
- return addr;
-}
-
-/** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
-{
- return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
-}
-
-/** Wrapper around agp_free_memory() */
-int drm_free_agp(DRM_AGP_MEM * handle, int pages)
-{
- return drm_agp_free_memory(handle) ? 0 : -EINVAL;
-}
-EXPORT_SYMBOL(drm_free_agp);
-
-/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
-{
- return drm_agp_bind_memory(handle, start);
-}
-
-/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(DRM_AGP_MEM * handle)
-{
- return drm_agp_unbind_memory(handle);
-}
-EXPORT_SYMBOL(drm_unbind_agp);
-
-#else /* __OS_HAS_AGP */
-static inline void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device * dev)
-{
- return NULL;
-}
-
-#endif /* agp */
-
-void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
-{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_core_ioremap);
-
-void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
-{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap_wc(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_core_ioremap_wc);
-
-void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
-{
- if (!map->handle || !map->size)
- return;
-
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- vunmap(map->handle);
- else
- iounmap(map->handle);
-}
-EXPORT_SYMBOL(drm_core_ioremapfree);
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-
-/*
- * Generic simple memory manager implementation. Intended to be used as a base
- * class implementation for more advanced memory managers.
- *
- * Note that the algorithm used is quite simple and there might be substantial
- * performance gains if a smarter free list is implemented. Currently it is just an
- * unordered stack of free regions. This could easily be improved if an RB-tree
- * is used instead. At least if we expect heavy fragmentation.
- *
- * Aligned allocations can also see improvement.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-#include "drm_mm.h"
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-
-#define MM_UNUSED_TARGET 4
-
-unsigned long drm_mm_tail_space(struct drm_mm *mm)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return 0;
-
- return entry->size;
-}
-
-int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return -ENOMEM;
-
- if (entry->size <= size)
- return -ENOMEM;
-
- entry->size -= size;
- return 0;
-}
-
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
- struct drm_mm_node *child;
-
- if (atomic)
- child = kmalloc(sizeof(*child), GFP_ATOMIC);
- else
- child = kmalloc(sizeof(*child), GFP_KERNEL);
-
- if (unlikely(child == NULL)) {
- spin_lock(&mm->unused_lock);
- if (list_empty(&mm->unused_nodes))
- child = NULL;
- else {
- child =
- list_entry(mm->unused_nodes.next,
- struct drm_mm_node, fl_entry);
- list_del(&child->fl_entry);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
- }
- return child;
-}
-
-/* drm_mm_pre_get() - pre allocate drm_mm_node structure
- * drm_mm: memory manager struct we are pre-allocating for
- *
- * Returns 0 on success or -ENOMEM if allocation fails.
- */
-int drm_mm_pre_get(struct drm_mm *mm)
-{
- struct drm_mm_node *node;
-
- spin_lock(&mm->unused_lock);
- while (mm->num_unused < MM_UNUSED_TARGET) {
- spin_unlock(&mm->unused_lock);
- node = kmalloc(sizeof(*node), GFP_KERNEL);
- spin_lock(&mm->unused_lock);
-
- if (unlikely(node == NULL)) {
- int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
- spin_unlock(&mm->unused_lock);
- return ret;
- }
- ++mm->num_unused;
- list_add_tail(&node->fl_entry, &mm->unused_nodes);
- }
- spin_unlock(&mm->unused_lock);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_pre_get);
-
-static int drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size, int atomic)
-{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(mm, atomic);
- if (unlikely(child == NULL))
- return -ENOMEM;
-
- child->free = 1;
- child->size = size;
- child->start = start;
- child->mm = mm;
-
- list_add_tail(&child->ml_entry, &mm->ml_entry);
- list_add_tail(&child->fl_entry, &mm->fl_entry);
-
- return 0;
-}
-
-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free) {
- return drm_mm_create_tail_node(mm, entry->start + entry->size,
- size, atomic);
- }
- entry->size += size;
- return 0;
-}
-
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size,
- int atomic)
-{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(parent->mm, atomic);
- if (unlikely(child == NULL))
- return NULL;
-
- INIT_LIST_HEAD(&child->fl_entry);
-
- child->free = 0;
- child->size = size;
- child->start = parent->start;
- child->mm = parent->mm;
-
- list_add_tail(&child->ml_entry, &parent->ml_entry);
- INIT_LIST_HEAD(&child->fl_entry);
-
- parent->size -= size;
- parent->start += size;
- return child;
-}
-
-
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- int atomic)
-{
-
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
-
- if (alignment)
- tmp = node->start % alignment;
-
- if (tmp) {
- align_splitoff =
- drm_mm_split_at_start(node, alignment - tmp, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
-
- if (node->size == size) {
- list_del_init(&node->fl_entry);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
-
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
-
- return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
-
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int atomic)
-{
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
- unsigned wasted = 0;
-
- if (node->start < start)
- wasted += start - node->start;
- if (alignment)
- tmp = ((node->start + wasted) % alignment);
-
- if (tmp)
- wasted += alignment - tmp;
- if (wasted) {
- align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
-
- if (node->size == size) {
- list_del_init(&node->fl_entry);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
-
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
-
- return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-
-/*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
- */
-
-void drm_mm_put_block(struct drm_mm_node *cur)
-{
-
- struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->ml_entry;
- struct list_head *root_head = &mm->ml_entry;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
-
- int merged = 0;
-
- if (cur_head->prev != root_head) {
- prev_node =
- list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
- if (prev_node->free) {
- prev_node->size += cur->size;
- merged = 1;
- }
- }
- if (cur_head->next != root_head) {
- next_node =
- list_entry(cur_head->next, struct drm_mm_node, ml_entry);
- if (next_node->free) {
- if (merged) {
- prev_node->size += next_node->size;
- list_del(&next_node->ml_entry);
- list_del(&next_node->fl_entry);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&next_node->fl_entry,
- &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(next_node);
- spin_unlock(&mm->unused_lock);
- } else {
- next_node->size += cur->size;
- next_node->start = cur->start;
- merged = 1;
- }
- }
- }
- if (!merged) {
- cur->free = 1;
- list_add(&cur->fl_entry, &mm->fl_entry);
- } else {
- list_del(&cur->ml_entry);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&cur->fl_entry, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(cur);
- spin_unlock(&mm->unused_lock);
- }
-}
-
-EXPORT_SYMBOL(drm_mm_put_block);
-
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment, int best_match)
-{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
- struct drm_mm_node *entry;
- struct drm_mm_node *best;
- unsigned long best_size;
- unsigned wasted;
-
- best = NULL;
- best_size = ~0UL;
-
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
-
- if (entry->size < size)
- continue;
-
- if (alignment) {
- register unsigned tmp = entry->start % alignment;
- if (tmp)
- wasted += alignment - tmp;
- }
-
- if (entry->size >= size + wasted) {
- if (!best_match)
- return entry;
- if (size < best_size) {
- best = entry;
- best_size = entry->size;
- }
- }
- }
-
- return best;
-}
-EXPORT_SYMBOL(drm_mm_search_free);
-
-struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int best_match)
-{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
- struct drm_mm_node *entry;
- struct drm_mm_node *best;
- unsigned long best_size;
- unsigned wasted;
-
- best = NULL;
- best_size = ~0UL;
-
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
-
- if (entry->size < size)
- continue;
-
- if (entry->start > end || (entry->start+entry->size) < start)
- continue;
-
- if (entry->start < start)
- wasted += start - entry->start;
-
- if (alignment) {
- register unsigned tmp = (entry->start + wasted) % alignment;
- if (tmp)
- wasted += alignment - tmp;
- }
-
- if (entry->size >= size + wasted) {
- if (!best_match)
- return entry;
- if (size < best_size) {
- best = entry;
- best_size = entry->size;
- }
- }
- }
-
- return best;
-}
-EXPORT_SYMBOL(drm_mm_search_free_in_range);
-
-int drm_mm_clean(struct drm_mm * mm)
-{
- struct list_head *head = &mm->ml_entry;
-
- return (head->next->next == head);
-}
-EXPORT_SYMBOL(drm_mm_clean);
-
-int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
-{
- INIT_LIST_HEAD(&mm->ml_entry);
- INIT_LIST_HEAD(&mm->fl_entry);
- INIT_LIST_HEAD(&mm->unused_nodes);
- mm->num_unused = 0;
- spin_lock_init(&mm->unused_lock);
-
- return drm_mm_create_tail_node(mm, start, size, 0);
-}
-EXPORT_SYMBOL(drm_mm_init);
-
-void drm_mm_takedown(struct drm_mm * mm)
-{
- struct list_head *bnode = mm->fl_entry.next;
- struct drm_mm_node *entry;
- struct drm_mm_node *next;
-
- entry = list_entry(bnode, struct drm_mm_node, fl_entry);
-
- if (entry->ml_entry.next != &mm->ml_entry ||
- entry->fl_entry.next != &mm->fl_entry) {
- DRM_ERROR("Memory manager not clean. Delaying takedown\n");
- return;
- }
-
- list_del(&entry->fl_entry);
- list_del(&entry->ml_entry);
- kfree(entry);
-
- spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
- list_del(&entry->fl_entry);
- kfree(entry);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
-
- BUG_ON(mm->num_unused != 0);
-}
-EXPORT_SYMBOL(drm_mm_takedown);
-
-void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
-{
- struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
- printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
- prefix, entry->start, entry->start + entry->size,
- entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
- }
- printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
- total_used, total_free);
-}
-EXPORT_SYMBOL(drm_mm_debug_table);
-
-#if defined(CONFIG_DEBUG_FS)
-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
-{
- struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
- }
- seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_dump_table);
-#endif
+++ /dev/null
-/*
- * The list_sort function is (presumably) licensed under the GPL (see the
- * top level "COPYING" file for details).
- *
- * The remainder of this file is:
- *
- * Copyright © 1997-2003 by The XFree86 Project, Inc.
- * Copyright © 2007 Dave Airlie
- * Copyright © 2007-2008 Intel Corporation
- * Jesse Barnes <jesse.barnes@intel.com>
- * Copyright 2005-2006 Luc Verhaegen
- * Copyright (c) 2001, Andy Ritger aritger@nvidia.com
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Except as contained in this notice, the name of the copyright holder(s)
- * and author(s) shall not be used in advertising or otherwise to promote
- * the sale, use or other dealings in this Software without prior written
- * authorization from the copyright holder(s) and author(s).
- */
-
-#include <linux/list.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
-
-/**
- * drm_mode_debug_printmodeline - debug print a mode
- * @dev: DRM device
- * @mode: mode to print
- *
- * LOCKING:
- * None.
- *
- * Describe @mode using DRM_DEBUG.
- */
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
-{
- DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
- "0x%x 0x%x\n",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
-}
-EXPORT_SYMBOL(drm_mode_debug_printmodeline);
-
-/**
- * drm_cvt_mode -create a modeline based on CVT algorithm
- * @dev: DRM device
- * @hdisplay: hdisplay size
- * @vdisplay: vdisplay size
- * @vrefresh : vrefresh rate
- * @reduced : Whether the GTF calculation is simplified
- * @interlaced:Whether the interlace is supported
- *
- * LOCKING:
- * none.
- *
- * return the modeline based on CVT algorithm
- *
- * This function is called to generate the modeline based on CVT algorithm
- * according to the hdisplay, vdisplay, vrefresh.
- * It is based from the VESA(TM) Coordinated Video Timing Generator by
- * Graham Loveridge April 9, 2003 available at
- * http://www.vesa.org/public/CVT/CVTd6r1.xls
- *
- * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
- * What I have done is to translate it by using integer calculation.
- */
-#define HV_FACTOR 1000
-struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool reduced, bool interlaced, bool margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
-#define CVT_MARGIN_PERCENTAGE 18
- /* 2) character cell horizontal granularity (pixels) - default 8 */
-#define CVT_H_GRANULARITY 8
- /* 3) Minimum vertical porch (lines) - default 3 */
-#define CVT_MIN_V_PORCH 3
- /* 4) Minimum number of vertical back porch lines - default 6 */
-#define CVT_MIN_V_BPORCH 6
- /* Pixel Clock step (kHz) */
-#define CVT_CLOCK_STEP 250
- struct drm_display_mode *drm_mode;
- unsigned int vfieldrate, hperiod;
- int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
- int interlace;
-
- /* allocate the drm_display_mode structure. If failure, we will
- * return directly
- */
- drm_mode = drm_mode_create(dev);
- if (!drm_mode)
- return NULL;
-
- /* the CVT default refresh rate is 60Hz */
- if (!vrefresh)
- vrefresh = 60;
-
- /* the required field fresh rate */
- if (interlaced)
- vfieldrate = vrefresh * 2;
- else
- vfieldrate = vrefresh;
-
- /* horizontal pixels */
- hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
-
- /* determine the left&right borders */
- hmargin = 0;
- if (margins) {
- hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
- hmargin -= hmargin % CVT_H_GRANULARITY;
- }
- /* find the total active pixels */
- drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
-
- /* find the number of lines per field */
- if (interlaced)
- vdisplay_rnd = vdisplay / 2;
- else
- vdisplay_rnd = vdisplay;
-
- /* find the top & bottom borders */
- vmargin = 0;
- if (margins)
- vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
-
- drm_mode->vdisplay = vdisplay + 2 * vmargin;
-
- /* Interlaced */
- if (interlaced)
- interlace = 1;
- else
- interlace = 0;
-
- /* Determine VSync Width from aspect ratio */
- if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
- vsync = 4;
- else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
- vsync = 5;
- else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
- vsync = 6;
- else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
- vsync = 7;
- else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
- vsync = 7;
- else /* custom */
- vsync = 10;
-
- if (!reduced) {
- /* simplify the GTF calculation */
- /* 4) Minimum time of vertical sync + back porch interval (µs)
- * default 550.0
- */
- int tmp1, tmp2;
-#define CVT_MIN_VSYNC_BP 550
- /* 3) Nominal HSync width (% of line period) - default 8 */
-#define CVT_HSYNC_PERCENTAGE 8
- unsigned int hblank_percentage;
- int vsyncandback_porch, vback_porch, hblank;
-
- /* estimated the horizontal period */
- tmp1 = HV_FACTOR * 1000000 -
- CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
- tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
- interlace;
- hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
-
- tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
- /* 9. Find number of lines in sync + backporch */
- if (tmp1 < (vsync + CVT_MIN_V_PORCH))
- vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
- else
- vsyncandback_porch = tmp1;
- /* 10. Find number of lines in back porch */
- vback_porch = vsyncandback_porch - vsync;
- drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
- vsyncandback_porch + CVT_MIN_V_PORCH;
- /* 5) Definition of Horizontal blanking time limitation */
- /* Gradient (%/kHz) - default 600 */
-#define CVT_M_FACTOR 600
- /* Offset (%) - default 40 */
-#define CVT_C_FACTOR 40
- /* Blanking time scaling factor - default 128 */
-#define CVT_K_FACTOR 128
- /* Scaling factor weighting - default 20 */
-#define CVT_J_FACTOR 20
-#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256)
-#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
- CVT_J_FACTOR)
- /* 12. Find ideal blanking duty cycle from formula */
- hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
- hperiod / 1000;
- /* 13. Blanking time */
- if (hblank_percentage < 20 * HV_FACTOR)
- hblank_percentage = 20 * HV_FACTOR;
- hblank = drm_mode->hdisplay * hblank_percentage /
- (100 * HV_FACTOR - hblank_percentage);
- hblank -= hblank % (2 * CVT_H_GRANULARITY);
- /* 14. find the total pixes per line */
- drm_mode->htotal = drm_mode->hdisplay + hblank;
- drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
- drm_mode->hsync_start = drm_mode->hsync_end -
- (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
- drm_mode->hsync_start += CVT_H_GRANULARITY -
- drm_mode->hsync_start % CVT_H_GRANULARITY;
- /* fill the Vsync values */
- drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
- drm_mode->vsync_end = drm_mode->vsync_start + vsync;
- } else {
- /* Reduced blanking */
- /* Minimum vertical blanking interval time (µs)- default 460 */
-#define CVT_RB_MIN_VBLANK 460
- /* Fixed number of clocks for horizontal sync */
-#define CVT_RB_H_SYNC 32
- /* Fixed number of clocks for horizontal blanking */
-#define CVT_RB_H_BLANK 160
- /* Fixed number of lines for vertical front porch - default 3*/
-#define CVT_RB_VFPORCH 3
- int vbilines;
- int tmp1, tmp2;
- /* 8. Estimate Horizontal period. */
- tmp1 = HV_FACTOR * 1000000 -
- CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
- tmp2 = vdisplay_rnd + 2 * vmargin;
- hperiod = tmp1 / (tmp2 * vfieldrate);
- /* 9. Find number of lines in vertical blanking */
- vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
- /* 10. Check if vertical blanking is sufficient */
- if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
- vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
- /* 11. Find total number of lines in vertical field */
- drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
- /* 12. Find total number of pixels in a line */
- drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
- /* Fill in HSync values */
- drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
- drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
- }
- /* 15/13. Find pixel clock frequency (kHz for xf86) */
- drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
- drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
- /* 18/16. Find actual vertical frame frequency */
- /* ignore - just set the mode flag for interlaced */
- if (interlaced)
- drm_mode->vtotal *= 2;
- /* Fill the mode line name */
- drm_mode_set_name(drm_mode);
- if (reduced)
- drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
- DRM_MODE_FLAG_NVSYNC);
- else
- drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_NHSYNC);
- if (interlaced)
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- return drm_mode;
-}
-EXPORT_SYMBOL(drm_cvt_mode);
-
-/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
- *
- * @dev :drm device
- * @hdisplay :hdisplay size
- * @vdisplay :vdisplay size
- * @vrefresh :vrefresh rate.
- * @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
- *
- * LOCKING.
- * none.
- *
- * return the modeline based on GTF algorithm
- *
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- * GTF Spreadsheet by Andy Morrish (1/5/97)
- * available at http://www.vesa.org
- *
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
- */
-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool interlaced, int margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
-#define GTF_MARGIN_PERCENTAGE 18
- /* 2) character cell horizontal granularity (pixels) - default 8 */
-#define GTF_CELL_GRAN 8
- /* 3) Minimum vertical porch (lines) - default 3 */
-#define GTF_MIN_V_PORCH 1
- /* width of vsync in lines */
-#define V_SYNC_RQD 3
- /* width of hsync as % of total line */
-#define H_SYNC_PERCENT 8
- /* min time of vsync + back porch (microsec) */
-#define MIN_VSYNC_PLUS_BP 550
- /* blanking formula gradient */
-#define GTF_M 600
- /* blanking formula offset */
-#define GTF_C 40
- /* blanking formula scaling factor */
-#define GTF_K 128
- /* blanking formula scaling factor */
-#define GTF_J 20
- /* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-#define GTF_M_PRIME (GTF_K * GTF_M / 256)
- struct drm_display_mode *drm_mode;
- unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
- int top_margin, bottom_margin;
- int interlace;
- unsigned int hfreq_est;
- int vsync_plus_bp, vback_porch;
- unsigned int vtotal_lines, vfieldrate_est, hperiod;
- unsigned int vfield_rate, vframe_rate;
- int left_margin, right_margin;
- unsigned int total_active_pixels, ideal_duty_cycle;
- unsigned int hblank, total_pixels, pixel_freq;
- int hsync, hfront_porch, vodd_front_porch_lines;
- unsigned int tmp1, tmp2;
-
- drm_mode = drm_mode_create(dev);
- if (!drm_mode)
- return NULL;
-
- /* 1. In order to give correct results, the number of horizontal
- * pixels requested is first processed to ensure that it is divisible
- * by the character size, by rounding it to the nearest character
- * cell boundary:
- */
- hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
- hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
-
- /* 2. If interlace is requested, the number of vertical lines assumed
- * by the calculation must be halved, as the computation calculates
- * the number of vertical lines per field.
- */
- if (interlaced)
- vdisplay_rnd = vdisplay / 2;
- else
- vdisplay_rnd = vdisplay;
-
- /* 3. Find the frame rate required: */
- if (interlaced)
- vfieldrate_rqd = vrefresh * 2;
- else
- vfieldrate_rqd = vrefresh;
-
- /* 4. Find number of lines in Top margin: */
- top_margin = 0;
- if (margins)
- top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
- 1000;
- /* 5. Find number of lines in bottom margin: */
- bottom_margin = top_margin;
-
- /* 6. If interlace is required, then set variable interlace: */
- if (interlaced)
- interlace = 1;
- else
- interlace = 0;
-
- /* 7. Estimate the Horizontal frequency */
- {
- tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
- tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
- 2 + interlace;
- hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
- }
-
- /* 8. Find the number of lines in V sync + back porch */
- /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
- vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
- vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
- /* 9. Find the number of lines in V back porch alone: */
- vback_porch = vsync_plus_bp - V_SYNC_RQD;
- /* 10. Find the total number of lines in Vertical field period: */
- vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
- vsync_plus_bp + GTF_MIN_V_PORCH;
- /* 11. Estimate the Vertical field frequency: */
- vfieldrate_est = hfreq_est / vtotal_lines;
- /* 12. Find the actual horizontal period: */
- hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
-
- /* 13. Find the actual Vertical field frequency: */
- vfield_rate = hfreq_est / vtotal_lines;
- /* 14. Find the Vertical frame frequency: */
- if (interlaced)
- vframe_rate = vfield_rate / 2;
- else
- vframe_rate = vfield_rate;
- /* 15. Find number of pixels in left margin: */
- if (margins)
- left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
- 1000;
- else
- left_margin = 0;
-
- /* 16.Find number of pixels in right margin: */
- right_margin = left_margin;
- /* 17.Find total number of active pixels in image and left and right */
- total_active_pixels = hdisplay_rnd + left_margin + right_margin;
- /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
- ideal_duty_cycle = GTF_C_PRIME * 1000 -
- (GTF_M_PRIME * 1000000 / hfreq_est);
- /* 19.Find the number of pixels in the blanking time to the nearest
- * double character cell: */
- hblank = total_active_pixels * ideal_duty_cycle /
- (100000 - ideal_duty_cycle);
- hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
- hblank = hblank * 2 * GTF_CELL_GRAN;
- /* 20.Find total number of pixels: */
- total_pixels = total_active_pixels + hblank;
- /* 21.Find pixel clock frequency: */
- pixel_freq = total_pixels * hfreq_est / 1000;
- /* Stage 1 computations are now complete; I should really pass
- * the results to another function and do the Stage 2 computations,
- * but I only need a few more values so I'll just append the
- * computations here for now */
- /* 17. Find the number of pixels in the horizontal sync period: */
- hsync = H_SYNC_PERCENT * total_pixels / 100;
- hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
- hsync = hsync * GTF_CELL_GRAN;
- /* 18. Find the number of pixels in horizontal front porch period */
- hfront_porch = hblank / 2 - hsync;
- /* 36. Find the number of lines in the odd front porch period: */
- vodd_front_porch_lines = GTF_MIN_V_PORCH ;
-
- /* finally, pack the results in the mode struct */
- drm_mode->hdisplay = hdisplay_rnd;
- drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
- drm_mode->hsync_end = drm_mode->hsync_start + hsync;
- drm_mode->htotal = total_pixels;
- drm_mode->vdisplay = vdisplay_rnd;
- drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
- drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
- drm_mode->vtotal = vtotal_lines;
-
- drm_mode->clock = pixel_freq;
-
- drm_mode_set_name(drm_mode);
- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-
- if (interlaced) {
- drm_mode->vtotal *= 2;
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
- }
-
- return drm_mode;
-}
-EXPORT_SYMBOL(drm_gtf_mode);
-/**
- * drm_mode_set_name - set the name on a mode
- * @mode: name will be set in this mode
- *
- * LOCKING:
- * None.
- *
- * Set the name of @mode to a standard format.
- */
-void drm_mode_set_name(struct drm_display_mode *mode)
-{
- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
- mode->vdisplay);
-}
-EXPORT_SYMBOL(drm_mode_set_name);
-
-/**
- * drm_mode_list_concat - move modes from one list to another
- * @head: source list
- * @new: dst list
- *
- * LOCKING:
- * Caller must ensure both lists are locked.
- *
- * Move all the modes from @head to @new.
- */
-void drm_mode_list_concat(struct list_head *head, struct list_head *new)
-{
-
- struct list_head *entry, *tmp;
-
- list_for_each_safe(entry, tmp, head) {
- list_move_tail(entry, new);
- }
-}
-EXPORT_SYMBOL(drm_mode_list_concat);
-
-/**
- * drm_mode_width - get the width of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's width (hdisplay) value.
- *
- * FIXME: is this needed?
- *
- * RETURNS:
- * @mode->hdisplay
- */
-int drm_mode_width(struct drm_display_mode *mode)
-{
- return mode->hdisplay;
-
-}
-EXPORT_SYMBOL(drm_mode_width);
-
-/**
- * drm_mode_height - get the height of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's height (vdisplay) value.
- *
- * FIXME: is this needed?
- *
- * RETURNS:
- * @mode->vdisplay
- */
-int drm_mode_height(struct drm_display_mode *mode)
-{
- return mode->vdisplay;
-}
-EXPORT_SYMBOL(drm_mode_height);
-
-/** drm_mode_hsync - get the hsync of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @modes's hsync rate in kHz, rounded to the nearest int.
- */
-int drm_mode_hsync(struct drm_display_mode *mode)
-{
- unsigned int calc_val;
-
- if (mode->hsync)
- return mode->hsync;
-
- if (mode->htotal < 0)
- return 0;
-
- calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
- calc_val += 500; /* round to 1000Hz */
- calc_val /= 1000; /* truncate to kHz */
-
- return calc_val;
-}
-EXPORT_SYMBOL(drm_mode_hsync);
-
-/**
- * drm_mode_vrefresh - get the vrefresh of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's vrefresh rate in Hz or calculate it if necessary.
- *
- * FIXME: why is this needed? shouldn't vrefresh be set already?
- *
- * RETURNS:
- * Vertical refresh rate. It will be the result of actual value plus 0.5.
- * If it is 70.288, it will return 70Hz.
- * If it is 59.6, it will return 60Hz.
- */
-int drm_mode_vrefresh(struct drm_display_mode *mode)
-{
- int refresh = 0;
- unsigned int calc_val;
-
- if (mode->vrefresh > 0)
- refresh = mode->vrefresh;
- else if (mode->htotal > 0 && mode->vtotal > 0) {
- int vtotal;
- vtotal = mode->vtotal;
- /* work out vrefresh the value will be x1000 */
- calc_val = (mode->clock * 1000);
- calc_val /= mode->htotal;
- refresh = (calc_val + vtotal / 2) / vtotal;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- refresh *= 2;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- refresh /= 2;
- if (mode->vscan > 1)
- refresh /= mode->vscan;
- }
- return refresh;
-}
-EXPORT_SYMBOL(drm_mode_vrefresh);
-
-/**
- * drm_mode_set_crtcinfo - set CRTC modesetting parameters
- * @p: mode
- * @adjust_flags: unused? (FIXME)
- *
- * LOCKING:
- * None.
- *
- * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
- */
-void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
-{
- if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
- return;
-
- p->crtc_hdisplay = p->hdisplay;
- p->crtc_hsync_start = p->hsync_start;
- p->crtc_hsync_end = p->hsync_end;
- p->crtc_htotal = p->htotal;
- p->crtc_hskew = p->hskew;
- p->crtc_vdisplay = p->vdisplay;
- p->crtc_vsync_start = p->vsync_start;
- p->crtc_vsync_end = p->vsync_end;
- p->crtc_vtotal = p->vtotal;
-
- if (p->flags & DRM_MODE_FLAG_INTERLACE) {
- if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
- p->crtc_vdisplay /= 2;
- p->crtc_vsync_start /= 2;
- p->crtc_vsync_end /= 2;
- p->crtc_vtotal /= 2;
- }
-
- p->crtc_vtotal |= 1;
- }
-
- if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
- p->crtc_vdisplay *= 2;
- p->crtc_vsync_start *= 2;
- p->crtc_vsync_end *= 2;
- p->crtc_vtotal *= 2;
- }
-
- if (p->vscan > 1) {
- p->crtc_vdisplay *= p->vscan;
- p->crtc_vsync_start *= p->vscan;
- p->crtc_vsync_end *= p->vscan;
- p->crtc_vtotal *= p->vscan;
- }
-
- p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
- p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
- p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
- p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
-
- p->crtc_hadjusted = false;
- p->crtc_vadjusted = false;
-}
-EXPORT_SYMBOL(drm_mode_set_crtcinfo);
-
-
-/**
- * drm_mode_duplicate - allocate and duplicate an existing mode
- * @m: mode to duplicate
- *
- * LOCKING:
- * None.
- *
- * Just allocate a new mode, copy the existing mode into it, and return
- * a pointer to it. Used to create new instances of established modes.
- */
-struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode)
-{
- struct drm_display_mode *nmode;
- int new_id;
-
- nmode = drm_mode_create(dev);
- if (!nmode)
- return NULL;
-
- new_id = nmode->base.id;
- *nmode = *mode;
- nmode->base.id = new_id;
- INIT_LIST_HEAD(&nmode->head);
- return nmode;
-}
-EXPORT_SYMBOL(drm_mode_duplicate);
-
-/**
- * drm_mode_equal - test modes for equality
- * @mode1: first mode
- * @mode2: second mode
- *
- * LOCKING:
- * None.
- *
- * Check to see if @mode1 and @mode2 are equivalent.
- *
- * RETURNS:
- * True if the modes are equal, false otherwise.
- */
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
-{
- /* do clock check convert to PICOS so fb modes get matched
- * the same */
- if (mode1->clock && mode2->clock) {
- if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
- return false;
- } else if (mode1->clock != mode2->clock)
- return false;
-
- if (mode1->hdisplay == mode2->hdisplay &&
- mode1->hsync_start == mode2->hsync_start &&
- mode1->hsync_end == mode2->hsync_end &&
- mode1->htotal == mode2->htotal &&
- mode1->hskew == mode2->hskew &&
- mode1->vdisplay == mode2->vdisplay &&
- mode1->vsync_start == mode2->vsync_start &&
- mode1->vsync_end == mode2->vsync_end &&
- mode1->vtotal == mode2->vtotal &&
- mode1->vscan == mode2->vscan &&
- mode1->flags == mode2->flags)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(drm_mode_equal);
-
-/**
- * drm_mode_validate_size - make sure modes adhere to size constraints
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @maxX: maximum width
- * @maxY: maximum height
- * @maxPitch: max pitch
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * The DRM device (@dev) has size and pitch limits. Here we validate the
- * modes we probed for @dev against those limits and set their status as
- * necessary.
- */
-void drm_mode_validate_size(struct drm_device *dev,
- struct list_head *mode_list,
- int maxX, int maxY, int maxPitch)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, mode_list, head) {
- if (maxPitch > 0 && mode->hdisplay > maxPitch)
- mode->status = MODE_BAD_WIDTH;
-
- if (maxX > 0 && mode->hdisplay > maxX)
- mode->status = MODE_VIRTUAL_X;
-
- if (maxY > 0 && mode->vdisplay > maxY)
- mode->status = MODE_VIRTUAL_Y;
- }
-}
-EXPORT_SYMBOL(drm_mode_validate_size);
-
-/**
- * drm_mode_validate_clocks - validate modes against clock limits
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @min: minimum clock rate array
- * @max: maximum clock rate array
- * @n_ranges: number of clock ranges (size of arrays)
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Some code may need to check a mode list against the clock limits of the
- * device in question. This function walks the mode list, testing to make
- * sure each mode falls within a given range (defined by @min and @max
- * arrays) and sets @mode->status as needed.
- */
-void drm_mode_validate_clocks(struct drm_device *dev,
- struct list_head *mode_list,
- int *min, int *max, int n_ranges)
-{
- struct drm_display_mode *mode;
- int i;
-
- list_for_each_entry(mode, mode_list, head) {
- bool good = false;
- for (i = 0; i < n_ranges; i++) {
- if (mode->clock >= min[i] && mode->clock <= max[i]) {
- good = true;
- break;
- }
- }
- if (!good)
- mode->status = MODE_CLOCK_RANGE;
- }
-}
-EXPORT_SYMBOL(drm_mode_validate_clocks);
-
-/**
- * drm_mode_prune_invalid - remove invalid modes from mode list
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @verbose: be verbose about it
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Once mode list generation is complete, a caller can use this routine to
- * remove invalid modes from a mode list. If any of the modes have a
- * status other than %MODE_OK, they are removed from @mode_list and freed.
- */
-void drm_mode_prune_invalid(struct drm_device *dev,
- struct list_head *mode_list, bool verbose)
-{
- struct drm_display_mode *mode, *t;
-
- list_for_each_entry_safe(mode, t, mode_list, head) {
- if (mode->status != MODE_OK) {
- list_del(&mode->head);
- if (verbose) {
- drm_mode_debug_printmodeline(mode);
- DRM_DEBUG_KMS("Not using %s mode %d\n",
- mode->name, mode->status);
- }
- drm_mode_destroy(dev, mode);
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_prune_invalid);
-
-/**
- * drm_mode_compare - compare modes for favorability
- * @lh_a: list_head for first mode
- * @lh_b: list_head for second mode
- *
- * LOCKING:
- * None.
- *
- * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
- * which is better.
- *
- * RETURNS:
- * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
- * positive if @lh_b is better than @lh_a.
- */
-static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
-{
- struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
- struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
- int diff;
-
- diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
- ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
- if (diff)
- return diff;
- diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
- if (diff)
- return diff;
- diff = b->clock - a->clock;
- return diff;
-}
-
-/* FIXME: what we don't have a list sort function? */
-/* list sort from Mark J Roberts (mjr@znex.org) */
-void list_sort(struct list_head *head,
- int (*cmp)(struct list_head *a, struct list_head *b))
-{
- struct list_head *p, *q, *e, *list, *tail, *oldhead;
- int insize, nmerges, psize, qsize, i;
-
- list = head->next;
- list_del(head);
- insize = 1;
- for (;;) {
- p = oldhead = list;
- list = tail = NULL;
- nmerges = 0;
-
- while (p) {
- nmerges++;
- q = p;
- psize = 0;
- for (i = 0; i < insize; i++) {
- psize++;
- q = q->next == oldhead ? NULL : q->next;
- if (!q)
- break;
- }
-
- qsize = insize;
- while (psize > 0 || (qsize > 0 && q)) {
- if (!psize) {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- } else if (!qsize || !q) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else if (cmp(p, q) <= 0) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- }
- if (tail)
- tail->next = e;
- else
- list = e;
- e->prev = tail;
- tail = e;
- }
- p = q;
- }
-
- tail->next = list;
- list->prev = tail;
-
- if (nmerges <= 1)
- break;
-
- insize *= 2;
- }
-
- head->next = list;
- head->prev = list->prev;
- list->prev->next = head;
- list->prev = head;
-}
-
-/**
- * drm_mode_sort - sort mode list
- * @mode_list: list to sort
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Sort @mode_list by favorability, putting good modes first.
- */
-void drm_mode_sort(struct list_head *mode_list)
-{
- list_sort(mode_list, drm_mode_compare);
-}
-EXPORT_SYMBOL(drm_mode_sort);
-
-/**
- * drm_mode_connector_list_update - update the mode list for the connector
- * @connector: the connector to update
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * This moves the modes from the @connector probed_modes list
- * to the actual mode list. It compares the probed mode against the current
- * list and only adds different modes. All modes unverified after this point
- * will be removed by the prune invalid modes.
- */
-void drm_mode_connector_list_update(struct drm_connector *connector)
-{
- struct drm_display_mode *mode;
- struct drm_display_mode *pmode, *pt;
- int found_it;
-
- list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
- head) {
- found_it = 0;
- /* go through current modes checking for the new probed mode */
- list_for_each_entry(mode, &connector->modes, head) {
- if (drm_mode_equal(pmode, mode)) {
- found_it = 1;
- /* if equal delete the probed mode */
- mode->status = pmode->status;
- /* Merge type bits together */
- mode->type |= pmode->type;
- list_del(&pmode->head);
- drm_mode_destroy(connector->dev, pmode);
- break;
- }
- }
-
- if (!found_it) {
- list_move_tail(&pmode->head, &connector->modes);
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_connector_list_update);
+++ /dev/null
-/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
-/**
- * \file drm_pci.c
- * \brief Functions and ioctls to manage PCI memory
- *
- * \warning These interfaces aren't stable yet.
- *
- * \todo Implement the remaining ioctl's for the PCI pools.
- * \todo The wrappers here are so thin that they would be better off inlined..
- *
- * \author José Fonseca <jrfonseca@tungstengraphics.com>
- * \author Leif Delgass <ldelgass@retinalburn.net>
- */
-
-/*
- * Copyright 2003 José Fonseca.
- * Copyright 2003 Leif Delgass.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include "drmP.h"
-
-/**********************************************************************/
-/** \name PCI memory */
-/*@{*/
-
-/**
- * \brief Allocate a PCI consistent memory block, for DMA.
- */
-drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
- dma_addr_t maxaddr)
-{
- drm_dma_handle_t *dmah;
-#if 1
- unsigned long addr;
- size_t sz;
-#endif
-
- /* pci_alloc_consistent only guarantees alignment to the smallest
- * PAGE_SIZE order which is greater than or equal to the requested size.
- * Return NULL here for now to make sure nobody tries for larger alignment
- */
- if (align > size)
- return NULL;
-
- if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
- DRM_ERROR("Setting pci dma mask failed\n");
- return NULL;
- }
-
- dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
- if (!dmah)
- return NULL;
-
- dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
-
- if (dmah->vaddr == NULL) {
- kfree(dmah);
- return NULL;
- }
-
- memset(dmah->vaddr, 0, size);
-
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Reserve */
- for (addr = (unsigned long)dmah->vaddr, sz = size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page(addr));
- }
-
- return dmah;
-}
-
-EXPORT_SYMBOL(drm_pci_alloc);
-
-/**
- * \brief Free a PCI consistent memory block without freeing its descriptor.
- *
- * This function is for internal use in the Linux-specific DRM core code.
- */
-void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
-#if 1
- unsigned long addr;
- size_t sz;
-#endif
-
- if (dmah->vaddr) {
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Unreserve */
- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- }
- dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
- dmah->busaddr);
- }
-}
-
-/**
- * \brief Free a PCI consistent memory block
- */
-void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
- __drm_pci_free(dev, dmah);
- kfree(dmah);
-}
-
-EXPORT_SYMBOL(drm_pci_free);
-
-/*@}*/
+++ /dev/null
-/**
- * \file drm_proc.c
- * /proc support for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * \par Acknowledgements:
- * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
- * the problem with the proc files not outputting all their information.
- */
-
-/*
- * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include "drmP.h"
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-/**
- * Proc file list.
- */
-static struct drm_info_list drm_proc_list[] = {
- {"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
- {"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
- {"bufs", drm_bufs_info, 0},
- {"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
- {"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
-
-static int drm_proc_open(struct inode *inode, struct file *file)
-{
- struct drm_info_node* node = PDE(inode)->data;
-
- return single_open(file, node->info_ent->show, node);
-}
-
-static const struct file_operations drm_proc_fops = {
- .owner = THIS_MODULE,
- .open = drm_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-/**
- * Initialize a given set of proc files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI proc dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of proc files represented by an array of
- * gdm_proc_lists in the given root directory.
- */
-int drm_proc_create_files(struct drm_info_list *files, int count,
- struct proc_dir_entry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct proc_dir_entry *ent;
- struct drm_info_node *tmp;
- char name[64];
- int i, ret;
-
- for (i = 0; i < count; i++) {
- u32 features = files[i].driver_features;
-
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
- continue;
-
- tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (tmp == NULL) {
- ret = -1;
- goto fail;
- }
- tmp->minor = minor;
- tmp->info_ent = &files[i];
- list_add(&tmp->list, &minor->proc_nodes.list);
-
- ent = proc_create_data(files[i].name, S_IRUGO, root,
- &drm_proc_fops, tmp);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
- name, files[i].name);
- list_del(&tmp->list);
- kfree(tmp);
- ret = -1;
- goto fail;
- }
-
- }
- return 0;
-
-fail:
- for (i = 0; i < count; i++)
- remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
- return ret;
-}
-
-/**
- * Initialize the DRI proc filesystem for a device
- *
- * \param dev DRM device
- * \param minor device minor number
- * \param root DRI proc dir entry.
- * \param dev_root resulting DRI device proc dir entry.
- * \return root entry pointer on success, or NULL on failure.
- *
- * Create the DRI proc root entry "/proc/dri", the device proc root entry
- * "/proc/dri/%minor%/", and each entry in proc_list as
- * "/proc/dri/%minor%/%name%".
- */
-int drm_proc_init(struct drm_minor *minor, int minor_id,
- struct proc_dir_entry *root)
-{
- struct drm_device *dev = minor->dev;
- char name[64];
- int ret;
-
- INIT_LIST_HEAD(&minor->proc_nodes.list);
- sprintf(name, "%d", minor_id);
- minor->proc_root = proc_mkdir(name, root);
- if (!minor->proc_root) {
- DRM_ERROR("Cannot create /proc/dri/%s\n", name);
- return -1;
- }
-
- ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
- minor->proc_root, minor);
- if (ret) {
- remove_proc_entry(name, root);
- minor->proc_root = NULL;
- DRM_ERROR("Failed to create core drm proc files\n");
- return ret;
- }
-
- if (dev->driver->proc_init) {
- ret = dev->driver->proc_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/proc/dri.\n");
- return ret;
- }
- }
- return 0;
-}
-
-int drm_proc_remove_files(struct drm_info_list *files, int count,
- struct drm_minor *minor)
-{
- struct list_head *pos, *q;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->proc_nodes.list) {
- tmp = list_entry(pos, struct drm_info_node, list);
- if (tmp->info_ent == &files[i]) {
- remove_proc_entry(files[i].name,
- minor->proc_root);
- list_del(pos);
- kfree(tmp);
- }
- }
- }
- return 0;
-}
-
-/**
- * Cleanup the proc filesystem resources.
- *
- * \param minor device minor number.
- * \param root DRI proc dir entry.
- * \param dev_root DRI device proc dir entry.
- * \return always zero.
- *
- * Remove all proc entries created by proc_init().
- */
-int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
-{
- struct drm_device *dev = minor->dev;
- char name[64];
-
- if (!root || !minor->proc_root)
- return 0;
-
- if (dev->driver->proc_cleanup)
- dev->driver->proc_cleanup(minor);
-
- drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
-
- sprintf(name, "%d", minor->index);
- remove_proc_entry(name, root);
-
- return 0;
-}
-
+++ /dev/null
-/**
- * \file drm_scatter.c
- * IOCTLs to manage scatter/gather memory
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/vmalloc.h>
-#include "drmP.h"
-
-#define DEBUG_SCATTER 0
-
-static inline void *drm_vmalloc_dma(unsigned long size)
-{
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
-#else
- return vmalloc_32(size);
-#endif
-}
-
-void drm_sg_cleanup(struct drm_sg_mem * entry)
-{
- struct page *page;
- int i;
-
- for (i = 0; i < entry->pages; i++) {
- page = entry->pagelist[i];
- if (page)
- ClearPageReserved(page);
- }
-
- vfree(entry->virtual);
-
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
-}
-
-#ifdef _LP64
-# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
-#else
-# define ScatterHandle(x) (unsigned int)(x)
-#endif
-
-int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
-{
- struct drm_sg_mem *entry;
- unsigned long pages, i, j;
-
- DRM_DEBUG("\n");
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
-
- if (dev->sg)
- return -EINVAL;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- memset(entry, 0, sizeof(*entry));
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
-
- entry->pages = pages;
- entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL);
- if (!entry->pagelist) {
- kfree(entry);
- return -ENOMEM;
- }
-
- memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
-
- entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL);
- if (!entry->busaddr) {
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
- memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
-
- entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
- if (!entry->virtual) {
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
-
- /* This also forces the mapping of COW pages, so our page list
- * will be valid. Please don't remove it...
- */
- memset(entry->virtual, 0, pages << PAGE_SHIFT);
-
- entry->handle = ScatterHandle((unsigned long)entry->virtual);
-
- DRM_DEBUG("handle = %08lx\n", entry->handle);
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- for (i = (unsigned long)entry->virtual, j = 0; j < pages;
- i += PAGE_SIZE, j++) {
- entry->pagelist[j] = vmalloc_to_page((void *)i);
- if (!entry->pagelist[j])
- goto failed;
- SetPageReserved(entry->pagelist[j]);
- }
-
- request->handle = entry->handle;
-
- dev->sg = entry;
-
-#if DEBUG_SCATTER
- /* Verify that each page points to its virtual address, and vice
- * versa.
- */
- {
- int error = 0;
-
- for (i = 0; i < pages; i++) {
- unsigned long *tmp;
-
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0xcafebabe;
- }
- tmp = (unsigned long *)((u8 *) entry->virtual +
- (PAGE_SIZE * i));
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- if (*tmp != 0xcafebabe && error == 0) {
- error = 1;
- DRM_ERROR("Scatter allocation error, "
- "pagelist does not match "
- "virtual mapping\n");
- }
- }
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0;
- }
- }
- if (error == 0)
- DRM_ERROR("Scatter allocation matches pagelist\n");
- }
-#endif
-
- return 0;
-
- failed:
- drm_sg_cleanup(entry);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_sg_alloc);
-
-
-int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
-
- return drm_sg_alloc(dev, request);
-
-}
-
-int drm_sg_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
- struct drm_sg_mem *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
-
- entry = dev->sg;
- dev->sg = NULL;
-
- if (!entry || entry->handle != request->handle)
- return -EINVAL;
-
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- drm_sg_cleanup(entry);
-
- return 0;
-}
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Simple memory manager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drm_sman.h"
-
-struct drm_owner_item {
- struct drm_hash_item owner_hash;
- struct list_head sman_list;
- struct list_head mem_blocks;
-};
-
-void drm_sman_takedown(struct drm_sman * sman)
-{
- drm_ht_remove(&sman->user_hash_tab);
- drm_ht_remove(&sman->owner_hash_tab);
- kfree(sman->mm);
-}
-
-EXPORT_SYMBOL(drm_sman_takedown);
-
-int
-drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
- unsigned int user_order, unsigned int owner_order)
-{
- int ret = 0;
-
- sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
- sizeof(*sman->mm),
- GFP_KERNEL);
- if (!sman->mm) {
- ret = -ENOMEM;
- goto out;
- }
- sman->num_managers = num_managers;
- INIT_LIST_HEAD(&sman->owner_items);
- ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
- if (ret)
- goto out1;
- ret = drm_ht_create(&sman->user_hash_tab, user_order);
- if (!ret)
- goto out;
-
- drm_ht_remove(&sman->owner_hash_tab);
-out1:
- kfree(sman->mm);
-out:
- return ret;
-}
-
-EXPORT_SYMBOL(drm_sman_init);
-
-static void *drm_sman_mm_allocate(void *private, unsigned long size,
- unsigned alignment)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- struct drm_mm_node *tmp;
-
- tmp = drm_mm_search_free(mm, size, alignment, 1);
- if (!tmp) {
- return NULL;
- }
- tmp = drm_mm_get_block(tmp, size, alignment);
- return tmp;
-}
-
-static void drm_sman_mm_free(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
-
- drm_mm_put_block(node);
-}
-
-static void drm_sman_mm_destroy(void *private)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- drm_mm_takedown(mm);
- kfree(mm);
-}
-
-static unsigned long drm_sman_mm_offset(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
- return node->start;
-}
-
-int
-drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
- unsigned long start, unsigned long size)
-{
- struct drm_sman_mm *sman_mm;
- struct drm_mm *mm;
- int ret;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- return -ENOMEM;
- }
- sman_mm->private = mm;
- ret = drm_mm_init(mm, start, size);
-
- if (ret) {
- kfree(mm);
- return ret;
- }
-
- sman_mm->allocate = drm_sman_mm_allocate;
- sman_mm->free = drm_sman_mm_free;
- sman_mm->destroy = drm_sman_mm_destroy;
- sman_mm->offset = drm_sman_mm_offset;
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_set_range);
-
-int
-drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
- struct drm_sman_mm * allocator)
-{
- BUG_ON(manager >= sman->num_managers);
- sman->mm[manager] = *allocator;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_sman_set_manager);
-
-static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
- unsigned long owner)
-{
- int ret;
- struct drm_hash_item *owner_hash_item;
- struct drm_owner_item *owner_item;
-
- ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
- if (!ret) {
- return drm_hash_entry(owner_hash_item, struct drm_owner_item,
- owner_hash);
- }
-
- owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
- if (!owner_item)
- goto out;
-
- INIT_LIST_HEAD(&owner_item->mem_blocks);
- owner_item->owner_hash.key = owner;
- if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
- goto out1;
-
- list_add_tail(&owner_item->sman_list, &sman->owner_items);
- return owner_item;
-
-out1:
- kfree(owner_item);
-out:
- return NULL;
-}
-
-struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
- unsigned long size, unsigned alignment,
- unsigned long owner)
-{
- void *tmp;
- struct drm_sman_mm *sman_mm;
- struct drm_owner_item *owner_item;
- struct drm_memblock_item *memblock;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- tmp = sman_mm->allocate(sman_mm->private, size, alignment);
-
- if (!tmp) {
- return NULL;
- }
-
- memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
-
- if (!memblock)
- goto out;
-
- memblock->mm_info = tmp;
- memblock->mm = sman_mm;
- memblock->sman = sman;
-
- if (drm_ht_just_insert_please
- (&sman->user_hash_tab, &memblock->user_hash,
- (unsigned long)memblock, 32, 0, 0))
- goto out1;
-
- owner_item = drm_sman_get_owner_item(sman, owner);
- if (!owner_item)
- goto out2;
-
- list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
-
- return memblock;
-
-out2:
- drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
-out1:
- kfree(memblock);
-out:
- sman_mm->free(sman_mm->private, tmp);
-
- return NULL;
-}
-
-EXPORT_SYMBOL(drm_sman_alloc);
-
-static void drm_sman_free(struct drm_memblock_item *item)
-{
- struct drm_sman *sman = item->sman;
-
- list_del(&item->owner_list);
- drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
- item->mm->free(item->mm->private, item->mm_info);
- kfree(item);
-}
-
-int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
-{
- struct drm_hash_item *hash_item;
- struct drm_memblock_item *memblock_item;
-
- if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
- return -EINVAL;
-
- memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
- user_hash);
- drm_sman_free(memblock_item);
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_free_key);
-
-static void drm_sman_remove_owner(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- list_del(&owner_item->sman_list);
- drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
- kfree(owner_item);
-}
-
-int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
- return -1;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
- drm_sman_remove_owner(sman, owner_item);
- return -1;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_owner_clean);
-
-static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- struct drm_memblock_item *entry, *next;
-
- list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
- owner_list) {
- drm_sman_free(entry);
- }
- drm_sman_remove_owner(sman, owner_item);
-}
-
-void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-
- return;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- drm_sman_do_owner_cleanup(sman, owner_item);
-}
-
-EXPORT_SYMBOL(drm_sman_owner_cleanup);
-
-void drm_sman_cleanup(struct drm_sman *sman)
-{
- struct drm_owner_item *entry, *next;
- unsigned int i;
- struct drm_sman_mm *sman_mm;
-
- list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
- drm_sman_do_owner_cleanup(sman, entry);
- }
- if (sman->mm) {
- for (i = 0; i < sman->num_managers; ++i) {
- sman_mm = &sman->mm[i];
- if (sman_mm->private) {
- sman_mm->destroy(sman_mm->private);
- sman_mm->private = NULL;
- }
- }
- }
-}
-
-EXPORT_SYMBOL(drm_sman_cleanup);
+++ /dev/null
-/**
- * \file drm_stub.h
- * Stub support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- */
-
-/*
- * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
- *
- * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include "drmP.h"
-#include "drm_core.h"
-
-unsigned int drm_debug = 0; /* 1 to enable debug output */
-EXPORT_SYMBOL(drm_debug);
-
-MODULE_AUTHOR(CORE_AUTHOR);
-MODULE_DESCRIPTION(CORE_DESC);
-MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output");
-
-module_param_named(debug, drm_debug, int, 0600);
-
-struct idr drm_minors_idr;
-
-struct class *drm_class;
-struct proc_dir_entry *drm_proc_root;
-struct dentry *drm_debugfs_root;
-void drm_ut_debug_printk(unsigned int request_level,
- const char *prefix,
- const char *function_name,
- const char *format, ...)
-{
- va_list args;
-
- if (drm_debug & request_level) {
- if (function_name)
- printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
- va_start(args, format);
- vprintk(format, args);
- va_end(args);
- }
-}
-EXPORT_SYMBOL(drm_ut_debug_printk);
-static int drm_minor_get_id(struct drm_device *dev, int type)
-{
- int new_id;
- int ret;
- int base = 0, limit = 63;
-
- if (type == DRM_MINOR_CONTROL) {
- base += 64;
- limit = base + 127;
- } else if (type == DRM_MINOR_RENDER) {
- base += 128;
- limit = base + 255;
- }
-
-again:
- if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
- mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&drm_minors_idr, NULL,
- base, &new_id);
- mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN) {
- goto again;
- } else if (ret) {
- return ret;
- }
-
- if (new_id >= limit) {
- idr_remove(&drm_minors_idr, new_id);
- return -EINVAL;
- }
- return new_id;
-}
-
-struct drm_master *drm_master_create(struct drm_minor *minor)
-{
- struct drm_master *master;
-
- master = kzalloc(sizeof(*master), GFP_KERNEL);
- if (!master)
- return NULL;
-
- kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
- drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
- INIT_LIST_HEAD(&master->magicfree);
- master->minor = minor;
-
- list_add_tail(&master->head, &minor->master_list);
-
- return master;
-}
-
-struct drm_master *drm_master_get(struct drm_master *master)
-{
- kref_get(&master->refcount);
- return master;
-}
-EXPORT_SYMBOL(drm_master_get);
-
-static void drm_master_destroy(struct kref *kref)
-{
- struct drm_master *master = container_of(kref, struct drm_master, refcount);
- struct drm_magic_entry *pt, *next;
- struct drm_device *dev = master->minor->dev;
- struct drm_map_list *r_list, *list_temp;
-
- list_del(&master->head);
-
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
- if (r_list->master == master) {
- drm_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
-
- if (master->unique) {
- kfree(master->unique);
- master->unique = NULL;
- master->unique_len = 0;
- }
-
- list_for_each_entry_safe(pt, next, &master->magicfree, head) {
- list_del(&pt->head);
- drm_ht_remove_item(&master->magiclist, &pt->hash_item);
- kfree(pt);
- }
-
- drm_ht_remove(&master->magiclist);
-
- kfree(master);
-}
-
-void drm_master_put(struct drm_master **master)
-{
- kref_put(&(*master)->refcount, drm_master_destroy);
- *master = NULL;
-}
-EXPORT_SYMBOL(drm_master_put);
-
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = 0;
-
- if (file_priv->is_master)
- return 0;
-
- if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
- return -EINVAL;
-
- if (!file_priv->master)
- return -EINVAL;
-
- if (!file_priv->minor->master &&
- file_priv->minor->master != file_priv->master) {
- mutex_lock(&dev->struct_mutex);
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
- }
- mutex_unlock(&dev->struct_mutex);
- }
-
- return 0;
-}
-
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- if (!file_priv->is_master)
- return -EINVAL;
-
- if (!file_priv->minor->master)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, false);
- drm_master_put(&file_priv->minor->master);
- file_priv->is_master = 0;
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- int retcode;
-
- INIT_LIST_HEAD(&dev->filelist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- INIT_LIST_HEAD(&dev->vblank_event_list);
-
- spin_lock_init(&dev->count_lock);
- spin_lock_init(&dev->drw_lock);
- spin_lock_init(&dev->event_lock);
- init_timer(&dev->timer);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
-
- idr_init(&dev->drw_idr);
-
- dev->pdev = pdev;
- dev->pci_device = pdev->device;
- dev->pci_vendor = pdev->vendor;
-
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
- if (drm_ht_create(&dev->map_hash, 12)) {
- return -ENOMEM;
- }
-
- /* the DRM has 6 basic counters */
- dev->counters = 6;
- dev->types[0] = _DRM_STAT_LOCK;
- dev->types[1] = _DRM_STAT_OPENS;
- dev->types[2] = _DRM_STAT_CLOSES;
- dev->types[3] = _DRM_STAT_IOCTLS;
- dev->types[4] = _DRM_STAT_LOCKS;
- dev->types[5] = _DRM_STAT_UNLOCKS;
-
- dev->driver = driver;
-
- if (drm_core_has_AGP(dev)) {
- if (drm_device_is_agp(dev))
- dev->agp = drm_agp_init(dev);
- if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
- && (dev->agp == NULL)) {
- DRM_ERROR("Cannot initialize the agpgart module.\n");
- retcode = -EINVAL;
- goto error_out_unreg;
- }
- if (drm_core_has_MTRR(dev)) {
- if (dev->agp)
- dev->agp->agp_mtrr =
- mtrr_add(dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
- }
- }
-
-
- retcode = drm_ctxbitmap_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto error_out_unreg;
- }
-
- if (driver->driver_features & DRIVER_GEM) {
- retcode = drm_gem_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot initialize graphics execution "
- "manager (GEM)\n");
- goto error_out_unreg;
- }
- }
-
- return 0;
-
- error_out_unreg:
- drm_lastclose(dev);
- return retcode;
-}
-
-
-/**
- * Get a secondary minor number.
- *
- * \param dev device data structure
- * \param sec-minor structure to hold the assigned minor
- * \return negative number on failure.
- *
- * Search an empty entry and initialize it to the given parameters, and
- * create the proc init entry via proc_init(). This routines assigns
- * minor numbers to secondary heads of multi-headed cards
- */
-static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
-{
- struct drm_minor *new_minor;
- int ret;
- int minor_id;
-
- DRM_DEBUG("\n");
-
- minor_id = drm_minor_get_id(dev, type);
- if (minor_id < 0)
- return minor_id;
-
- new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
- if (!new_minor) {
- ret = -ENOMEM;
- goto err_idr;
- }
-
- new_minor->type = type;
- new_minor->device = MKDEV(DRM_MAJOR, minor_id);
- new_minor->dev = dev;
- new_minor->index = minor_id;
- INIT_LIST_HEAD(&new_minor->master_list);
-
- idr_replace(&drm_minors_idr, new_minor, minor_id);
-
- if (type == DRM_MINOR_LEGACY) {
- ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
- goto err_mem;
- }
- } else
- new_minor->proc_root = NULL;
-
-#if defined(CONFIG_DEBUG_FS)
- ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- goto err_g2;
- }
-#endif
-
- ret = drm_sysfs_device_add(new_minor);
- if (ret) {
- printk(KERN_ERR
- "DRM: Error sysfs_device_add.\n");
- goto err_g2;
- }
- *minor = new_minor;
-
- DRM_DEBUG("new minor assigned %d\n", minor_id);
- return 0;
-
-
-err_g2:
- if (new_minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(new_minor, drm_proc_root);
-err_mem:
- kfree(new_minor);
-err_idr:
- idr_remove(&drm_minors_idr, minor_id);
- *minor = NULL;
- return ret;
-}
-
-/**
- * Register.
- *
- * \param pdev - PCI device structure
- * \param ent entry from the PCI ID table with device type flags
- * \return zero on success or a negative number on failure.
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- */
-int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- struct drm_device *dev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- ret = pci_enable_device(pdev);
- if (ret)
- goto err_g1;
-
- pci_set_master(pdev);
- if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g2;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- pci_set_drvdata(pdev, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g2;
- }
-
- if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
- goto err_g3;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, ent->driver_data);
- if (ret)
- goto err_g4;
- }
-
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
- if (ret)
- goto err_g4;
- }
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, pci_name(pdev), dev->primary->index);
-
- return 0;
-
-err_g4:
- drm_put_minor(&dev->primary);
-err_g3:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g2:
- pci_disable_device(pdev);
-err_g1:
- kfree(dev);
- return ret;
-}
-EXPORT_SYMBOL(drm_get_dev);
-
-/**
- * Put a secondary minor number.
- *
- * \param sec_minor - structure to be released
- * \return always zero
- *
- * Cleans up the proc resources. Not legal for this to be the
- * last minor released.
- *
- */
-int drm_put_minor(struct drm_minor **minor_p)
-{
- struct drm_minor *minor = *minor_p;
-
- DRM_DEBUG("release secondary minor %d\n", minor->index);
-
- if (minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(minor, drm_proc_root);
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_cleanup(minor);
-#endif
-
- drm_sysfs_device_remove(minor);
-
- idr_remove(&drm_minors_idr, minor->index);
-
- kfree(minor);
- *minor_p = NULL;
- return 0;
-}
-
-/**
- * Called via drm_exit() at module unload time or when pci device is
- * unplugged.
- *
- * Cleans up all DRM device, calling drm_lastclose().
- *
- * \sa drm_init
- */
-void drm_put_dev(struct drm_device *dev)
-{
- struct drm_driver *driver;
- struct drm_map_list *r_list, *list_temp;
-
- DRM_DEBUG("\n");
-
- if (!dev) {
- DRM_ERROR("cleanup called no dev\n");
- return;
- }
- driver = dev->driver;
-
- drm_vblank_cleanup(dev);
-
- drm_lastclose(dev);
-
- if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->agp_mtrr >= 0) {
- int retval;
- retval = mtrr_del(dev->agp->agp_mtrr,
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size * 1024 * 1024);
- DRM_DEBUG("mtrr_del=%d\n", retval);
- }
-
- if (dev->driver->unload)
- dev->driver->unload(dev);
-
- if (drm_core_has_AGP(dev) && dev->agp) {
- kfree(dev->agp);
- dev->agp = NULL;
- }
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_rmmap(dev, r_list->map);
- drm_ht_remove(&dev->map_hash);
-
- drm_ctxbitmap_cleanup(dev);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-
- if (driver->driver_features & DRIVER_GEM)
- drm_gem_destroy(dev);
-
- drm_put_minor(&dev->primary);
-
- if (dev->devname) {
- kfree(dev->devname);
- dev->devname = NULL;
- }
- kfree(dev);
-}
-EXPORT_SYMBOL(drm_put_dev);
+++ /dev/null
-
-/*
- * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
- * extra sysfs attribute from DRM. Normal drm_sysfs_class
- * does not allow adding attributes.
- *
- * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
- * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
- * Copyright (c) 2003-2004 IBM Corp.
- *
- * This file is released under the GPLv2
- *
- */
-
-#include <linux/device.h>
-#include <linux/kdev_t.h>
-#include <linux/err.h>
-
-#include "drm_sysfs.h"
-#include "drm_core.h"
-#include "drmP.h"
-
-#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
-#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
-
-static struct device_type drm_sysfs_device_minor = {
- .name = "drm_minor"
-};
-
-/**
- * drm_class_suspend - DRM class suspend hook
- * @dev: Linux device to suspend
- * @state: power state to enter
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its suspend hook, if present.
- */
-static int drm_class_suspend(struct device *dev, pm_message_t state)
-{
- if (dev->type == &drm_sysfs_device_minor) {
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->suspend)
- return drm_dev->driver->suspend(drm_dev, state);
- }
- return 0;
-}
-
-/**
- * drm_class_resume - DRM class resume hook
- * @dev: Linux device to resume
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its resume hook, if present.
- */
-static int drm_class_resume(struct device *dev)
-{
- if (dev->type == &drm_sysfs_device_minor) {
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->resume)
- return drm_dev->driver->resume(drm_dev);
- }
- return 0;
-}
-
-/* Display the version of drm_core. This doesn't work right in current design */
-static ssize_t version_show(struct class *dev, char *buf)
-{
- return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
- CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
-}
-
-static char *drm_devnode(struct device *dev, mode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
-}
-
-static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
-
-/**
- * drm_sysfs_create - create a struct drm_sysfs_class structure
- * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
- * @name: pointer to a string for the name of this class.
- *
- * This is used to create DRM class pointer that can then be used
- * in calls to drm_sysfs_device_add().
- *
- * Note, the pointer created here is to be destroyed when finished by making a
- * call to drm_sysfs_destroy().
- */
-struct class *drm_sysfs_create(struct module *owner, char *name)
-{
- struct class *class;
- int err;
-
- class = class_create(owner, name);
- if (IS_ERR(class)) {
- err = PTR_ERR(class);
- goto err_out;
- }
-
- class->suspend = drm_class_suspend;
- class->resume = drm_class_resume;
-
- err = class_create_file(class, &class_attr_version);
- if (err)
- goto err_out_class;
-
- class->devnode = drm_devnode;
-
- return class;
-
-err_out_class:
- class_destroy(class);
-err_out:
- return ERR_PTR(err);
-}
-
-/**
- * drm_sysfs_destroy - destroys DRM class
- *
- * Destroy the DRM device class.
- */
-void drm_sysfs_destroy(void)
-{
- if ((drm_class == NULL) || (IS_ERR(drm_class)))
- return;
- class_remove_file(drm_class, &class_attr_version);
- class_destroy(drm_class);
-}
-
-/**
- * drm_sysfs_device_release - do nothing
- * @dev: Linux device
- *
- * Normally, this would free the DRM device associated with @dev, along
- * with cleaning up any other stuff. But we do that in the DRM core, so
- * this function can just return and hope that the core does its job.
- */
-static void drm_sysfs_device_release(struct device *dev)
-{
- memset(dev, 0, sizeof(struct device));
- return;
-}
-
-/*
- * Connector properties
- */
-static ssize_t status_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- enum drm_connector_status status;
-
- status = connector->funcs->detect(connector);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- drm_get_connector_status_name(status));
-}
-
-static ssize_t dpms_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- uint64_t dpms_status;
- int ret;
-
- ret = drm_connector_property_get_value(connector,
- dev->mode_config.dpms_property,
- &dpms_status);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- drm_get_dpms_name((int)dpms_status));
-}
-
-static ssize_t enabled_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
-
- return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
- "disabled");
-}
-
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- struct device *connector_dev = container_of(kobj, struct device, kobj);
- struct drm_connector *connector = to_drm_connector(connector_dev);
- unsigned char *edid;
- size_t size;
-
- if (!connector->edid_blob_ptr)
- return 0;
-
- edid = connector->edid_blob_ptr->data;
- size = connector->edid_blob_ptr->length;
- if (!edid)
- return 0;
-
- if (off >= size)
- return 0;
-
- if (off + count > size)
- count = size - off;
- memcpy(buf, edid + off, count);
-
- return count;
-}
-
-static ssize_t modes_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_display_mode *mode;
- int written = 0;
-
- list_for_each_entry(mode, &connector->modes, head) {
- written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
- mode->name);
- }
-
- return written;
-}
-
-static ssize_t subconnector_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- struct drm_property *prop = NULL;
- uint64_t subconnector;
- int is_tv = 0;
- int ret;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- prop = dev->mode_config.dvi_i_subconnector_property;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- prop = dev->mode_config.tv_subconnector_property;
- is_tv = 1;
- break;
- default:
- DRM_ERROR("Wrong connector type for this property\n");
- return 0;
- }
-
- if (!prop) {
- DRM_ERROR("Unable to find subconnector property\n");
- return 0;
- }
-
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
- drm_get_tv_subconnector_name((int)subconnector) :
- drm_get_dvi_i_subconnector_name((int)subconnector));
-}
-
-static ssize_t select_subconnector_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- struct drm_property *prop = NULL;
- uint64_t subconnector;
- int is_tv = 0;
- int ret;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- prop = dev->mode_config.dvi_i_select_subconnector_property;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- prop = dev->mode_config.tv_select_subconnector_property;
- is_tv = 1;
- break;
- default:
- DRM_ERROR("Wrong connector type for this property\n");
- return 0;
- }
-
- if (!prop) {
- DRM_ERROR("Unable to find select subconnector property\n");
- return 0;
- }
-
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
- drm_get_tv_select_name((int)subconnector) :
- drm_get_dvi_i_select_name((int)subconnector));
-}
-
-static struct device_attribute connector_attrs[] = {
- __ATTR_RO(status),
- __ATTR_RO(enabled),
- __ATTR_RO(dpms),
- __ATTR_RO(modes),
-};
-
-/* These attributes are for both DVI-I connectors and all types of tv-out. */
-static struct device_attribute connector_attrs_opt1[] = {
- __ATTR_RO(subconnector),
- __ATTR_RO(select_subconnector),
-};
-
-static struct bin_attribute edid_attr = {
- .attr.name = "edid",
- .attr.mode = 0444,
- .size = 128,
- .read = edid_show,
-};
-
-/**
- * drm_sysfs_connector_add - add an connector to sysfs
- * @connector: connector to add
- *
- * Create an connector device in sysfs, along with its associated connector
- * properties (so far, connection status, dpms, mode list & edid) and
- * generate a hotplug event so userspace knows there's a new connector
- * available.
- *
- * Note:
- * This routine should only be called *once* for each DRM minor registered.
- * A second call for an already registered device will trigger the BUG_ON
- * below.
- */
-int drm_sysfs_connector_add(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- int ret = 0, i, j;
-
- /* We shouldn't get called more than once for the same connector */
- BUG_ON(device_is_registered(&connector->kdev));
-
- connector->kdev.parent = &dev->primary->kdev;
- connector->kdev.class = drm_class;
- connector->kdev.release = drm_sysfs_device_release;
-
- DRM_DEBUG("adding \"%s\" to sysfs\n",
- drm_get_connector_name(connector));
-
- dev_set_name(&connector->kdev, "card%d-%s",
- dev->primary->index, drm_get_connector_name(connector));
- ret = device_register(&connector->kdev);
-
- if (ret) {
- DRM_ERROR("failed to register connector device: %d\n", ret);
- goto out;
- }
-
- /* Standard attributes */
-
- for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs[i]);
- if (ret)
- goto err_out_files;
- }
-
- /* Optional attributes */
- /*
- * In the long run it maybe a good idea to make one set of
- * optionals per connector type.
- */
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
- if (ret)
- goto err_out_files;
- }
- break;
- default:
- break;
- }
-
- ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
- if (ret)
- goto err_out_files;
-
- /* Let userspace know we have a new connector */
- drm_sysfs_hotplug_event(dev);
-
- return 0;
-
-err_out_files:
- if (i > 0)
- for (j = 0; j < i; j++)
- device_remove_file(&connector->kdev,
- &connector_attrs[i]);
- device_unregister(&connector->kdev);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(drm_sysfs_connector_add);
-
-/**
- * drm_sysfs_connector_remove - remove an connector device from sysfs
- * @connector: connector to remove
- *
- * Remove @connector and its associated attributes from sysfs. Note that
- * the device model core will take care of sending the "remove" uevent
- * at this time, so we don't need to do it.
- *
- * Note:
- * This routine should only be called if the connector was previously
- * successfully registered. If @connector hasn't been registered yet,
- * you'll likely see a panic somewhere deep in sysfs code when called.
- */
-void drm_sysfs_connector_remove(struct drm_connector *connector)
-{
- int i;
-
- DRM_DEBUG("removing \"%s\" from sysfs\n",
- drm_get_connector_name(connector));
-
- for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
- device_remove_file(&connector->kdev, &connector_attrs[i]);
- sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
- device_unregister(&connector->kdev);
-}
-EXPORT_SYMBOL(drm_sysfs_connector_remove);
-
-/**
- * drm_sysfs_hotplug_event - generate a DRM uevent
- * @dev: DRM device
- *
- * Send a uevent for the DRM device specified by @dev. Currently we only
- * set HOTPLUG=1 in the uevent environment, but this could be expanded to
- * deal with other types of events.
- */
-void drm_sysfs_hotplug_event(struct drm_device *dev)
-{
- char *event_string = "HOTPLUG=1";
- char *envp[] = { event_string, NULL };
-
- DRM_DEBUG("generating hotplug event\n");
-
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
-}
-EXPORT_SYMBOL(drm_sysfs_hotplug_event);
-
-/**
- * drm_sysfs_device_add - adds a class device to sysfs for a character driver
- * @dev: DRM device to be added
- * @head: DRM head in question
- *
- * Add a DRM device to the DRM's device model class. We use @dev's PCI device
- * as the parent for the Linux device, and make sure it has a file containing
- * the driver we're using (for userspace compatibility).
- */
-int drm_sysfs_device_add(struct drm_minor *minor)
-{
- int err;
- char *minor_str;
-
- minor->kdev.parent = &minor->dev->pdev->dev;
- minor->kdev.class = drm_class;
- minor->kdev.release = drm_sysfs_device_release;
- minor->kdev.devt = minor->device;
- minor->kdev.type = &drm_sysfs_device_minor;
- if (minor->type == DRM_MINOR_CONTROL)
- minor_str = "controlD%d";
- else if (minor->type == DRM_MINOR_RENDER)
- minor_str = "renderD%d";
- else
- minor_str = "card%d";
-
- dev_set_name(&minor->kdev, minor_str, minor->index);
-
- err = device_register(&minor->kdev);
- if (err) {
- DRM_ERROR("device add failed: %d\n", err);
- goto err_out;
- }
-
- return 0;
-
-err_out:
- return err;
-}
-
-/**
- * drm_sysfs_device_remove - remove DRM device
- * @dev: DRM device to remove
- *
- * This call unregisters and cleans up a class device that was created with a
- * call to drm_sysfs_device_add()
- */
-void drm_sysfs_device_remove(struct drm_minor *minor)
-{
- device_unregister(&minor->kdev);
-}
-
-
-/**
- * drm_class_device_register - Register a struct device in the drm class.
- *
- * @dev: pointer to struct device to register.
- *
- * @dev should have all relevant members pre-filled with the exception
- * of the class member. In particular, the device_type member must
- * be set.
- */
-
-int drm_class_device_register(struct device *dev)
-{
- dev->class = drm_class;
- return device_register(dev);
-}
-EXPORT_SYMBOL_GPL(drm_class_device_register);
-
-void drm_class_device_unregister(struct device *dev)
-{
- return device_unregister(dev);
-}
-EXPORT_SYMBOL_GPL(drm_class_device_unregister);
+++ /dev/null
-/**
- * \file drm_vm.c
- * Memory mapping for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#if defined(__ia64__)
-#include <linux/efi.h>
-#endif
-
-static void drm_vm_open(struct vm_area_struct *vma);
-static void drm_vm_close(struct vm_area_struct *vma);
-
-static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__i386__) || defined(__x86_64__)
- if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
- pgprot_val(tmp) |= _PAGE_PCD;
- pgprot_val(tmp) &= ~_PAGE_PWT;
- }
-#elif defined(__powerpc__)
- pgprot_val(tmp) |= _PAGE_NO_CACHE;
- if (map_type == _DRM_REGISTERS)
- pgprot_val(tmp) |= _PAGE_GUARDED;
-#elif defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end -
- vma->vm_start))
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__)
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
-}
-
-static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- tmp |= _PAGE_NO_CACHE;
-#endif
- return tmp;
-}
-
-/**
- * \c fault method for AGP virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Find the right map and if it's AGP memory find the real physical page to
- * map, get the page, increment the use count and return it.
- */
-#if __OS_HAS_AGP
-static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- struct drm_hash_item *hash;
-
- /*
- * Find the right map
- */
- if (!drm_core_has_AGP(dev))
- goto vm_fault_error;
-
- if (!dev->agp || !dev->agp->cant_use_aperture)
- goto vm_fault_error;
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
- goto vm_fault_error;
-
- r_list = drm_hash_entry(hash, struct drm_map_list, hash);
- map = r_list->map;
-
- if (map && map->type == _DRM_AGP) {
- /*
- * Using vm_pgoff as a selector forces us to use this unusual
- * addressing scheme.
- */
- resource_size_t offset = (unsigned long)vmf->virtual_address -
- vma->vm_start;
- resource_size_t baddr = map->offset + offset;
- struct drm_agp_mem *agpmem;
- struct page *page;
-
-#ifdef __alpha__
- /*
- * Adjust to a bus-relative address
- */
- baddr -= dev->hose->mem_space->start;
-#endif
-
- /*
- * It's AGP memory - find the real physical page to map
- */
- list_for_each_entry(agpmem, &dev->agp->memory, head) {
- if (agpmem->bound <= baddr &&
- agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
- break;
- }
-
- if (!agpmem)
- goto vm_fault_error;
-
- /*
- * Get the page, inc the use count, and return it
- */
- offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
- page = agpmem->memory->pages[offset];
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG
- ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
- (unsigned long long)baddr,
- agpmem->memory->pages[offset],
- (unsigned long long)offset,
- page_count(page));
- return 0;
- }
-vm_fault_error:
- return VM_FAULT_SIGBUS; /* Disallow mremap */
-}
-#else /* __OS_HAS_AGP */
-static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return VM_FAULT_SIGBUS;
-}
-#endif /* __OS_HAS_AGP */
-
-/**
- * \c nopage method for shared virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Get the mapping, find the real physical page to map, get the page, and
- * return it.
- */
-static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_local_map *map = vma->vm_private_data;
- unsigned long offset;
- unsigned long i;
- struct page *page;
-
- if (!map)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
- i = (unsigned long)map->handle + offset;
- page = vmalloc_to_page((void *)i);
- if (!page)
- return VM_FAULT_SIGBUS;
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("shm_fault 0x%lx\n", offset);
- return 0;
-}
-
-/**
- * \c close method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Deletes map information if we are the last
- * person to close a mapping and it's not in the global maplist.
- */
-static void drm_vm_shm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
- int found_maps = 0;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
-
- map = vma->vm_private_data;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma->vm_private_data == map)
- found_maps++;
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- }
- }
-
- /* We were the only map that was found */
- if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
- /* Check to see if we are in the maplist, if we are not, then
- * we delete this mappings information.
- */
- found_maps = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map == map)
- found_maps++;
- }
-
- if (!found_maps) {
- drm_dma_handle_t dmah;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
- int retcode;
- retcode = mtrr_del(map->mtrr,
- map->offset,
- map->size);
- DRM_DEBUG("mtrr_del = %d\n", retcode);
- }
- iounmap(map->handle);
- break;
- case _DRM_SHM:
- vfree(map->handle);
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_pci_free(dev, &dmah);
- break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
- }
- kfree(map);
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * \c fault method for DMA virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
- */
-static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_device_dma *dma = dev->dma;
- unsigned long offset;
- unsigned long page_nr;
- struct page *page;
-
- if (!dma)
- return VM_FAULT_SIGBUS; /* Error */
- if (!dma->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
- page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
- page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
-
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
- return 0;
-}
-
-/**
- * \c fault method for scatter-gather virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
- */
-static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct drm_local_map *map = vma->vm_private_data;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_sg_mem *entry = dev->sg;
- unsigned long offset;
- unsigned long map_offset;
- unsigned long page_offset;
- struct page *page;
-
- if (!entry)
- return VM_FAULT_SIGBUS; /* Error */
- if (!entry->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
- map_offset = map->offset - (unsigned long)dev->sg->virtual;
- page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
- page = entry->pagelist[page_offset];
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_fault(vma, vmf);
-}
-
-static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_shm_fault(vma, vmf);
-}
-
-static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_dma_fault(vma, vmf);
-}
-
-static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_sg_fault(vma, vmf);
-}
-
-/** AGP virtual memory operations */
-static const struct vm_operations_struct drm_vm_ops = {
- .fault = drm_vm_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Shared virtual memory operations */
-static const struct vm_operations_struct drm_vm_shm_ops = {
- .fault = drm_vm_shm_fault,
- .open = drm_vm_open,
- .close = drm_vm_shm_close,
-};
-
-/** DMA virtual memory operations */
-static const struct vm_operations_struct drm_vm_dma_ops = {
- .fault = drm_vm_dma_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Scatter-gather virtual memory operations */
-static const struct vm_operations_struct drm_vm_sg_ops = {
- .fault = drm_vm_sg_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/**
- * \c open method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Create a new drm_vma_entry structure as the \p vma private data entry and
- * add it to drm_device::vmalist.
- */
-void drm_vm_open_locked(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *vma_entry;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_inc(&dev->vma_count);
-
- vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
- if (vma_entry) {
- vma_entry->vma = vma;
- vma_entry->pid = current->pid;
- list_add(&vma_entry->head, &dev->vmalist);
- }
-}
-
-static void drm_vm_open(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- break;
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * Sets the virtual memory area operations structure to vm_dma_ops, the file
- * pointer, and calls vm_open().
- */
-static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- struct drm_device_dma *dma;
- unsigned long length = vma->vm_end - vma->vm_start;
-
- dev = priv->minor->dev;
- dma = dev->dma;
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- /* Length must match exact page count */
- if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
- return -EINVAL;
- }
-
- if (!capable(CAP_SYS_ADMIN) &&
- (dma->flags & _DRM_DMA_USE_PCI_RO)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- vma->vm_ops = &drm_vm_dma_ops;
-
- vma->vm_flags |= VM_RESERVED; /* Don't swap */
- vma->vm_flags |= VM_DONTEXPAND;
-
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
- return 0;
-}
-
-resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
-{
- return map->offset;
-}
-
-EXPORT_SYMBOL(drm_core_get_map_ofs);
-
-resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
-{
-#ifdef __alpha__
- return dev->hose->dense_mem_base - dev->hose->mem_space->start;
-#else
- return 0;
-#endif
-}
-
-EXPORT_SYMBOL(drm_core_get_reg_ofs);
-
-/**
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * If the virtual memory area has no offset associated with it then it's a DMA
- * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
- * checks that the restricted flag is not set, sets the virtual memory operations
- * according to the mapping type and remaps the pages. Finally sets the file
- * pointer and calls vm_open().
- */
-int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- resource_size_t offset = 0;
- struct drm_hash_item *hash;
-
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- if (!priv->authenticated)
- return -EACCES;
-
- /* We check for "dma". On Apple's UniNorth, it's valid to have
- * the AGP mapped at physical address 0
- * --BenH.
- */
- if (!vma->vm_pgoff
-#if __OS_HAS_AGP
- && (!dev->agp
- || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
-#endif
- )
- return drm_mmap_dma(filp, vma);
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
- DRM_ERROR("Could not find map\n");
- return -EINVAL;
- }
-
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
- return -EPERM;
-
- /* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- switch (map->type) {
- case _DRM_AGP:
- if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
- /*
- * On some platforms we can't talk to bus dma address from the CPU, so for
- * memory of type DRM_AGP, we'll deal with sorting out the real physical
- * pages and mappings in fault()
- */
-#if defined(__powerpc__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-#endif
- vma->vm_ops = &drm_vm_ops;
- break;
- }
- /* fall through to _DRM_FRAME_BUFFER... */
- case _DRM_FRAME_BUFFER:
- case _DRM_REGISTERS:
- offset = dev->driver->get_reg_ofs(dev);
- vma->vm_flags |= VM_IO; /* not in core dump */
- vma->vm_page_prot = drm_io_prot(map->type, vma);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
- DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
- " offset = 0x%llx\n",
- map->type,
- vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
- vma->vm_ops = &drm_vm_ops;
- break;
- case _DRM_CONSISTENT:
- /* Consistent memory is really like shared memory. But
- * it's allocated in a different way, so avoid fault */
- if (remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(virt_to_page(map->handle)),
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- /* fall through to _DRM_SHM */
- case _DRM_SHM:
- vma->vm_ops = &drm_vm_shm_ops;
- vma->vm_private_data = (void *)map;
- /* Don't let this area swap. Change when
- DRM_KERNEL advisory is supported. */
- vma->vm_flags |= VM_RESERVED;
- break;
- case _DRM_SCATTER_GATHER:
- vma->vm_ops = &drm_vm_sg_ops;
- vma->vm_private_data = (void *)map;
- vma->vm_flags |= VM_RESERVED;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- break;
- default:
- return -EINVAL; /* This should never happen. */
- }
- vma->vm_flags |= VM_RESERVED; /* Don't swap */
- vma->vm_flags |= VM_DONTEXPAND;
-
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
- return 0;
-}
-
-int drm_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm_mmap_locked(filp, vma);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_mmap);
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
-#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
-#define PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
-#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
-
-#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST+1)
+#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
#define PVRSRV_BRIDGE_INITSRV_CONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0)
#define PVRSRV_BRIDGE_INITSRV_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
#define PVRSRV_BRIDGE_INITSRV_CMD_LAST (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
PVRSRV_ERROR eError;
}PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM;
-typedef struct PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR_TAG
-{
- IMG_UINT32 ui32BridgeFlags;
-#if defined (SUPPORT_SID_INTERFACE)
- IMG_SID hDevMemContext;
-#else
- IMG_HANDLE hDevMemContext;
-#endif
-}PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR;
-
-typedef struct PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR_TAG
-{
- IMG_DEV_PHYADDR sPDDevPAddr;
- PVRSRV_ERROR eError;
-}PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR;
-
typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAI_TAG
{
IMG_UINT32 ui32BridgeFlags;
PVRSRV_COMMAND *psCommand);
-
-IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
-PVRSRVGetMMUContextPDDevPAddr(const PVRSRV_CONNECTION *psConnection,
-#if defined (SUPPORT_SID_INTERFACE)
- IMG_SID hDevMemContext,
-#else
- IMG_HANDLE hDevMemContext,
-#endif
- IMG_DEV_PHYADDR *sPDDevPAddr);
-
IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
PVRSRVAllocSharedSysMem(const PVRSRV_CONNECTION *psConnection,
IMG_UINT32 ui32Flags,
#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9)
-#define PVRSRV_BRIDGE_SGX_GETMMUPDADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+10)
-
#if defined(TRANSFER_QUEUE)
#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13)
#endif
#define PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28)
-#define PVRSRV_BRIDGE_SGX_READ_HWPERF_CB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+30)
+#define PVRSRV_BRIDGE_SGX_READ_HWPERF_CB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+29)
+#define PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+30)
+#define PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+31)
#if defined(PDUMP)
-#define PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+31)
-#define PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+32)
-#define PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+33)
-#define PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+34)
-#define PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+35)
-#define PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+36)
+#define PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+32)
+#define PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+33)
+#define PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+34)
+#define PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+35)
+#define PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+36)
+#define PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+37)
#endif
-#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+36)
+#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+37)
typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR
}PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR;
-typedef struct PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR_TAG
+typedef struct PVRSRV_BRIDGE_IN_SGX_SET_TRANSFER_CONTEXT_PRIORITY_TAG
+ {
+ IMG_UINT32 ui32BridgeFlags;
+ #if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hHWTransferContext;
+ #else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hHWTransferContext;
+ #endif
+ IMG_UINT32 ui32Priority;
+ IMG_UINT32 ui32OffsetOfPriorityField;
+}PVRSRV_BRIDGE_IN_SGX_SET_TRANSFER_CONTEXT_PRIORITY;
+
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_SET_RENDER_CONTEXT_PRIORITY_TAG
{
IMG_UINT32 ui32BridgeFlags;
#if defined (SUPPORT_SID_INTERFACE)
IMG_SID hDevCookie;
- IMG_SID hDevMemContext;
+ IMG_SID hHWRenderContext;
#else
IMG_HANDLE hDevCookie;
- IMG_HANDLE hDevMemContext;
+ IMG_HANDLE hHWRenderContext;
#endif
-}PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR;
-
-
-typedef struct PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR_TAG
-{
- IMG_DEV_PHYADDR sPDDevPAddr;
- PVRSRV_ERROR eError;
-}PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR;
+ IMG_UINT32 ui32Priority;
+ IMG_UINT32 ui32OffsetOfPriorityField;
+}PVRSRV_BRIDGE_IN_SGX_SET_RENDER_CONTEXT_PRIORITY;
typedef struct PVRSRV_BRIDGE_IN_GETCLIENTINFO_TAG
#else
IMG_HANDLE hDevCookie;
#endif
- IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
+ IMG_CPU_VIRTADDR pHWRenderContextCpuVAddr;
+ IMG_UINT32 ui32HWRenderContextSize;
+ IMG_UINT32 ui32OffsetToPDDevPAddr;
+ IMG_HANDLE hDevMemContext;
}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT;
typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
#else
IMG_HANDLE hHWRenderContext;
#endif
+ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT;
typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
#else
IMG_HANDLE hDevCookie;
#endif
- IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
+ IMG_CPU_VIRTADDR pHWTransferContextCpuVAddr;
+ IMG_UINT32 ui32HWTransferContextSize;
+ IMG_UINT32 ui32OffsetToPDDevPAddr;
+ IMG_HANDLE hDevMemContext;
}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT;
typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
#else
IMG_HANDLE hHWTransferContext;
#endif
+ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT;
typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT_TAG
#else
IMG_HANDLE hDevCookie;
#endif
- IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
+ IMG_CPU_VIRTADDR pHW2DContextCpuVAddr;
+ IMG_UINT32 ui32HW2DContextSize;
+ IMG_UINT32 ui32OffsetToPDDevPAddr;
+ IMG_HANDLE hDevMemContext;
}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT;
typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT_TAG
#else
IMG_HANDLE hHW2DContext;
#endif
+ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT;
typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT_TAG
#endif
#endif
-#if defined(FIX_HW_BRN_31542)
+#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513)
#if defined (SUPPORT_SID_INTERFACE)
IMG_SID hKernelClearClipWAVDMStreamMemInfo;
IMG_SID hKernelClearClipWAIndexStreamMemInfo;
#else
IMG_HANDLE hKernelEDMStatusBufferMemInfo;
#endif
-#endif
-#if defined(SGX_FEATURE_OVERLAPPED_SPM)
-#if defined (SUPPORT_SID_INTERFACE)
- IMG_SID hKernelTmpRgnHeaderMemInfo;
-#else
- IMG_HANDLE hKernelTmpRgnHeaderMemInfo;
-#endif
#endif
IMG_UINT32 ui32EDMTaskReg0;
#endif
-#include "srvkm.h"
+#include "srvkm.h"
PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
#if defined(DEBUG_BRIDGE_KM)
PVRSRV_ERROR
CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
- IMG_UINT32 ui32BridgeID,
- IMG_VOID *pvDest,
- IMG_VOID *pvSrc,
- IMG_UINT32 ui32Size)
+ IMG_UINT32 ui32BridgeID,
+ IMG_VOID *pvDest,
+ IMG_VOID *pvSrc,
+ IMG_UINT32 ui32Size)
{
- g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes+=ui32Size;
- g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
- return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size);
+ g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes+=ui32Size;
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
+ return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size);
}
PVRSRV_ERROR
CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
- IMG_UINT32 ui32BridgeID,
- IMG_VOID *pvDest,
- IMG_VOID *pvSrc,
- IMG_UINT32 ui32Size)
+ IMG_UINT32 ui32BridgeID,
+ IMG_VOID *pvDest,
+ IMG_VOID *pvSrc,
+ IMG_UINT32 ui32Size)
{
- g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes+=ui32Size;
- g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
- return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size);
+ g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes+=ui32Size;
+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
+ return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size);
}
#endif
static IMG_INT
PVRSRVEnumerateDevicesBW(IMG_UINT32 ui32BridgeID,
- IMG_VOID *psBridgeIn,
- PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES);
- PVR_UNREFERENCED_PARAMETER(psPerProc);
- PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
- psEnumDeviceOUT->eError =
- PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices,
- psEnumDeviceOUT->asDeviceIdentifier);
+ psEnumDeviceOUT->eError =
+ PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices,
+ psEnumDeviceOUT->asDeviceIdentifier);
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVAcquireDeviceDataBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN,
- PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN,
+ PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hDevCookieInt;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO);
-
- psAcquireDevInfoOUT->eError =
- PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex,
- psAcquireDevInfoIN->eDeviceType,
- &hDevCookieInt);
- if(psAcquireDevInfoOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- psAcquireDevInfoOUT->eError =
- PVRSRVAllocHandle(psPerProc->psHandleBase,
- &psAcquireDevInfoOUT->hDevCookie,
- hDevCookieInt,
- PVRSRV_HANDLE_TYPE_DEV_NODE,
- PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
-
- return 0;
+ IMG_HANDLE hDevCookieInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO);
+
+ psAcquireDevInfoOUT->eError =
+ PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex,
+ psAcquireDevInfoIN->eDeviceType,
+ &hDevCookieInt);
+ if(psAcquireDevInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psAcquireDevInfoOUT->eError =
+ PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psAcquireDevInfoOUT->hDevCookie,
+ hDevCookieInt,
+ PVRSRV_HANDLE_TYPE_DEV_NODE,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+
+ return 0;
}
static IMG_INT
PVRSRVCreateDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN,
- PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN,
+ PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hDevCookieInt;
- IMG_HANDLE hDevMemContextInt;
- IMG_UINT32 i;
- IMG_BOOL bCreated;
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemContextInt;
+ IMG_UINT32 i;
+ IMG_BOOL bCreated;
#if defined (SUPPORT_SID_INTERFACE)
- PVRSRV_HEAP_INFO_KM asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
+ PVRSRV_HEAP_INFO_KM asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
#endif
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT);
-
- NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS + 1)
+
+ NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS + 1)
- psCreateDevMemContextOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
- psCreateDevMemContextIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
+ psCreateDevMemContextOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psCreateDevMemContextIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psCreateDevMemContextOUT->eError =
- PVRSRVCreateDeviceMemContextKM(hDevCookieInt,
- psPerProc,
- &hDevMemContextInt,
- &psCreateDevMemContextOUT->ui32ClientHeapCount,
+ psCreateDevMemContextOUT->eError =
+ PVRSRVCreateDeviceMemContextKM(hDevCookieInt,
+ psPerProc,
+ &hDevMemContextInt,
+ &psCreateDevMemContextOUT->ui32ClientHeapCount,
#if defined (SUPPORT_SID_INTERFACE)
- &asHeapInfo[0],
+ &asHeapInfo[0],
#else
- &psCreateDevMemContextOUT->sHeapInfo[0],
+ &psCreateDevMemContextOUT->sHeapInfo[0],
#endif
- &bCreated,
- pbSharedDeviceMemHeap);
-
- if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- if(bCreated)
- {
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &psCreateDevMemContextOUT->hDevMemContext,
- hDevMemContextInt,
- PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE);
- }
- else
- {
- psCreateDevMemContextOUT->eError =
- PVRSRVFindHandle(psPerProc->psHandleBase,
- &psCreateDevMemContextOUT->hDevMemContext,
- hDevMemContextInt,
- PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
- if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
- }
-
- for(i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++)
- {
+ &bCreated,
+ pbSharedDeviceMemHeap);
+
+ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ if(bCreated)
+ {
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psCreateDevMemContextOUT->hDevMemContext,
+ hDevMemContextInt,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ }
+ else
+ {
+ psCreateDevMemContextOUT->eError =
+ PVRSRVFindHandle(psPerProc->psHandleBase,
+ &psCreateDevMemContextOUT->hDevMemContext,
+ hDevMemContextInt,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ for(i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++)
+ {
#if defined (SUPPORT_SID_INTERFACE)
- IMG_SID hDevMemHeapExt;
+ IMG_SID hDevMemHeapExt;
#else
- IMG_HANDLE hDevMemHeapExt;
+ IMG_HANDLE hDevMemHeapExt;
#endif
#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
- if(abSharedDeviceMemHeap[i])
+ if(abSharedDeviceMemHeap[i])
#endif
- {
-
+ {
+
#if defined (SUPPORT_SID_INTERFACE)
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &hDevMemHeapExt,
- asHeapInfo[i].hDevMemHeap,
- PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
- PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
+ asHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
#else
- PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
- psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
- PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
- PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
+ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
#endif
- }
+ }
#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
- else
- {
-
- if(bCreated)
- {
+ else
+ {
+
+ if(bCreated)
+ {
#if defined (SUPPORT_SID_INTERFACE)
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &hDevMemHeapExt,
- asHeapInfo[i].hDevMemHeap,
- PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psCreateDevMemContextOUT->hDevMemContext);
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
+ asHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psCreateDevMemContextOUT->hDevMemContext);
#else
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
- psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
- PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psCreateDevMemContextOUT->hDevMemContext);
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
+ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psCreateDevMemContextOUT->hDevMemContext);
#endif
- }
- else
- {
- psCreateDevMemContextOUT->eError =
- PVRSRVFindHandle(psPerProc->psHandleBase,
- &hDevMemHeapExt,
+ }
+ else
+ {
+ psCreateDevMemContextOUT->eError =
+ PVRSRVFindHandle(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
#if defined (SUPPORT_SID_INTERFACE)
- asHeapInfo[i].hDevMemHeap,
+ asHeapInfo[i].hDevMemHeap,
#else
- psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
+ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
#endif
- PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
- if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
- }
- }
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
+ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+ }
#endif
- psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
+ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
#if defined (SUPPORT_SID_INTERFACE)
- psCreateDevMemContextOUT->sHeapInfo[i].ui32HeapID = asHeapInfo[i].ui32HeapID;
- psCreateDevMemContextOUT->sHeapInfo[i].sDevVAddrBase = asHeapInfo[i].sDevVAddrBase;
- psCreateDevMemContextOUT->sHeapInfo[i].ui32HeapByteSize = asHeapInfo[i].ui32HeapByteSize;
- psCreateDevMemContextOUT->sHeapInfo[i].ui32Attribs = asHeapInfo[i].ui32Attribs;
- psCreateDevMemContextOUT->sHeapInfo[i].ui32XTileStride = asHeapInfo[i].ui32XTileStride;
+ psCreateDevMemContextOUT->sHeapInfo[i].ui32HeapID = asHeapInfo[i].ui32HeapID;
+ psCreateDevMemContextOUT->sHeapInfo[i].sDevVAddrBase = asHeapInfo[i].sDevVAddrBase;
+ psCreateDevMemContextOUT->sHeapInfo[i].ui32HeapByteSize = asHeapInfo[i].ui32HeapByteSize;
+ psCreateDevMemContextOUT->sHeapInfo[i].ui32Attribs = asHeapInfo[i].ui32Attribs;
+ psCreateDevMemContextOUT->sHeapInfo[i].ui32XTileStride = asHeapInfo[i].ui32XTileStride;
#endif
- }
+ }
- COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc)
+ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc)
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVDestroyDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hDevCookieInt;
- IMG_HANDLE hDevMemContextInt;
- IMG_BOOL bDestroyed;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT);
-
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
- psDestroyDevMemContextIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
-
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
- psDestroyDevMemContextIN->hDevMemContext,
- PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
-
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, &bDestroyed);
-
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- if(bDestroyed)
- {
- psRetOUT->eError =
- PVRSRVReleaseHandle(psPerProc->psHandleBase,
- psDestroyDevMemContextIN->hDevMemContext,
- PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
- }
-
- return 0;
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemContextInt;
+ IMG_BOOL bDestroyed;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psDestroyDevMemContextIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
+ psDestroyDevMemContextIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, &bDestroyed);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ if(bDestroyed)
+ {
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psDestroyDevMemContextIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+ }
+
+ return 0;
}
static IMG_INT
PVRSRVGetDeviceMemHeapInfoBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoIN,
- PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoIN,
+ PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hDevCookieInt;
- IMG_HANDLE hDevMemContextInt;
- IMG_UINT32 i;
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemContextInt;
+ IMG_UINT32 i;
#if defined (SUPPORT_SID_INTERFACE)
- PVRSRV_HEAP_INFO_KM asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
+ PVRSRV_HEAP_INFO_KM asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
#endif
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO);
- NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS)
+ NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS)
- psGetDevMemHeapInfoOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
- psGetDevMemHeapInfoIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
+ psGetDevMemHeapInfoOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psGetDevMemHeapInfoIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psGetDevMemHeapInfoOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
- psGetDevMemHeapInfoIN->hDevMemContext,
- PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+ psGetDevMemHeapInfoOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
+ psGetDevMemHeapInfoIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
- if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psGetDevMemHeapInfoOUT->eError =
- PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt,
- hDevMemContextInt,
- &psGetDevMemHeapInfoOUT->ui32ClientHeapCount,
+ psGetDevMemHeapInfoOUT->eError =
+ PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt,
+ hDevMemContextInt,
+ &psGetDevMemHeapInfoOUT->ui32ClientHeapCount,
#if defined (SUPPORT_SID_INTERFACE)
&asHeapInfo[0],
#else
- &psGetDevMemHeapInfoOUT->sHeapInfo[0],
+ &psGetDevMemHeapInfoOUT->sHeapInfo[0],
#endif
- pbSharedDeviceMemHeap);
+ pbSharedDeviceMemHeap);
- if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- for(i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++)
- {
+ for(i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++)
+ {
#if defined (SUPPORT_SID_INTERFACE)
- IMG_SID hDevMemHeapExt;
+ IMG_SID hDevMemHeapExt;
#else
- IMG_HANDLE hDevMemHeapExt;
+ IMG_HANDLE hDevMemHeapExt;
#endif
#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
- if(abSharedDeviceMemHeap[i])
+ if(abSharedDeviceMemHeap[i])
#endif
- {
-
+ {
+
#if defined (SUPPORT_SID_INTERFACE)
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &hDevMemHeapExt,
- asHeapInfo[i].hDevMemHeap,
- PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
- PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
+ asHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
#else
- PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
- psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
- PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
- PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
#endif
- }
+ }
#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
- else
- {
-
- psGetDevMemHeapInfoOUT->eError =
- PVRSRVFindHandle(psPerProc->psHandleBase,
- &hDevMemHeapExt,
+ else
+ {
+
+ psGetDevMemHeapInfoOUT->eError =
+ PVRSRVFindHandle(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
#if defined (SUPPORT_SID_INTERFACE)
- asHeapInfo[i].hDevMemHeap,
+ asHeapInfo[i].hDevMemHeap,
#else
- psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
#endif
- PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
- if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
- }
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
+ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
#endif
- psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
#if defined (SUPPORT_SID_INTERFACE)
- psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32HeapID = asHeapInfo[i].ui32HeapID;
- psGetDevMemHeapInfoOUT->sHeapInfo[i].sDevVAddrBase = asHeapInfo[i].sDevVAddrBase;
- psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32HeapByteSize = asHeapInfo[i].ui32HeapByteSize;
- psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32Attribs = asHeapInfo[i].ui32Attribs;
- psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32XTileStride = asHeapInfo[i].ui32XTileStride;
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32HeapID = asHeapInfo[i].ui32HeapID;
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].sDevVAddrBase = asHeapInfo[i].sDevVAddrBase;
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32HeapByteSize = asHeapInfo[i].ui32HeapByteSize;
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32Attribs = asHeapInfo[i].ui32Attribs;
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32XTileStride = asHeapInfo[i].ui32XTileStride;
#endif
- }
+ }
- COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc)
+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc)
- return 0;
+ return 0;
}
#if defined(OS_PVRSRV_ALLOC_DEVICE_MEM_BW)
IMG_INT
PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
- PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc);
+ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
+ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
#else
static IMG_INT
PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
- PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
+ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_MEM_INFO *psMemInfo;
- IMG_HANDLE hDevCookieInt;
- IMG_HANDLE hDevMemHeapInt;
- IMG_UINT32 ui32ShareIndex;
- IMG_BOOL bUseShareMemWorkaround;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM);
-
- NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2)
-
- psAllocDeviceMemOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
- psAllocDeviceMemIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
-
- if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psAllocDeviceMemOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt,
- psAllocDeviceMemIN->hDevMemHeap,
- PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
-
- if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
-
- bUseShareMemWorkaround = ((psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_XPROC) != 0) ? IMG_TRUE : IMG_FALSE;
- ui32ShareIndex = 7654321;
-
- if (bUseShareMemWorkaround)
- {
-
-
-
- psAllocDeviceMemOUT->eError =
- BM_XProcWorkaroundFindNewBufferAndSetShareIndex(&ui32ShareIndex);
- if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
- }
-
- psAllocDeviceMemOUT->eError =
- PVRSRVAllocDeviceMemKM(hDevCookieInt,
- psPerProc,
- hDevMemHeapInt,
- psAllocDeviceMemIN->ui32Attribs,
- psAllocDeviceMemIN->ui32Size,
- psAllocDeviceMemIN->ui32Alignment,
- &psMemInfo,
- "" );
-
- if (bUseShareMemWorkaround)
- {
- PVR_ASSERT(ui32ShareIndex != 7654321);
- BM_XProcWorkaroundUnsetShareIndex(ui32ShareIndex);
- }
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemHeapInt;
+ IMG_UINT32 ui32ShareIndex;
+ IMG_BOOL bUseShareMemWorkaround;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2)
+
+ psAllocDeviceMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psAllocDeviceMemIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ psAllocDeviceMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt,
+ psAllocDeviceMemIN->hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
- psMemInfo->sShareMemWorkaround.bInUse = bUseShareMemWorkaround;
- if (bUseShareMemWorkaround)
- {
+ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+
+ bUseShareMemWorkaround = ((psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_XPROC) != 0) ? IMG_TRUE : IMG_FALSE;
+ ui32ShareIndex = 7654321;
+
+ if (bUseShareMemWorkaround)
+ {
+
+
+
+ psAllocDeviceMemOUT->eError =
+ BM_XProcWorkaroundFindNewBufferAndSetShareIndex(&ui32ShareIndex);
+ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ psAllocDeviceMemOUT->eError =
+ PVRSRVAllocDeviceMemKM(hDevCookieInt,
+ psPerProc,
+ hDevMemHeapInt,
+ psAllocDeviceMemIN->ui32Attribs,
+ psAllocDeviceMemIN->ui32Size,
+ psAllocDeviceMemIN->ui32Alignment,
+ &psMemInfo,
+ "" );
+
+ if (bUseShareMemWorkaround)
+ {
PVR_ASSERT(ui32ShareIndex != 7654321);
- psMemInfo->sShareMemWorkaround.ui32ShareIndex = ui32ShareIndex;
- psMemInfo->sShareMemWorkaround.hDevCookieInt = hDevCookieInt;
- psMemInfo->sShareMemWorkaround.ui32OrigReqAttribs = psAllocDeviceMemIN->ui32Attribs;
- psMemInfo->sShareMemWorkaround.ui32OrigReqSize = (IMG_UINT32)psAllocDeviceMemIN->ui32Size;
- psMemInfo->sShareMemWorkaround.ui32OrigReqAlignment = (IMG_UINT32)psAllocDeviceMemIN->ui32Alignment;
- }
+ BM_XProcWorkaroundUnsetShareIndex(ui32ShareIndex);
+ }
- OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo,
- 0,
- sizeof(psAllocDeviceMemOUT->sClientMemInfo));
+ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psMemInfo->sShareMemWorkaround.bInUse = bUseShareMemWorkaround;
+ if (bUseShareMemWorkaround)
+ {
+ PVR_ASSERT(ui32ShareIndex != 7654321);
+ psMemInfo->sShareMemWorkaround.ui32ShareIndex = ui32ShareIndex;
+ psMemInfo->sShareMemWorkaround.hDevCookieInt = hDevCookieInt;
+ psMemInfo->sShareMemWorkaround.ui32OrigReqAttribs = psAllocDeviceMemIN->ui32Attribs;
+ psMemInfo->sShareMemWorkaround.ui32OrigReqSize = (IMG_UINT32)psAllocDeviceMemIN->ui32Size;
+ psMemInfo->sShareMemWorkaround.ui32OrigReqAlignment = (IMG_UINT32)psAllocDeviceMemIN->ui32Alignment;
+ }
- psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
- psMemInfo->pvLinAddrKM;
+ OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo,
+ 0,
+ sizeof(psAllocDeviceMemOUT->sClientMemInfo));
+
+ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
+ psMemInfo->pvLinAddrKM;
#if defined (__linux__)
- psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0;
+ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0;
#else
- psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM;
+ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM;
#endif
- psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
- psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
- psAllocDeviceMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize;
+ psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
+ psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
+ psAllocDeviceMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize;
#if defined (SUPPORT_SID_INTERFACE)
+
#else
- psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
+ psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
#endif
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo,
- psMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE);
-
- #if defined (SUPPORT_SID_INTERFACE)
- PVR_ASSERT(psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo != 0);
-
- if (psMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
- {
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo,
- psMemInfo->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
- }
- else
- {
- psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = 0;
- }
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo,
+ psMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_ASSERT(psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo != 0);
+
+ if (psMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo,
+ psMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = 0;
+ }
#endif
- if(psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ)
- {
-
- OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo,
- 0,
- sizeof (PVRSRV_CLIENT_SYNC_INFO));
- psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL;
- }
- else
- {
-
+ if(psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ)
+ {
+
+ OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo,
+ 0,
+ sizeof (PVRSRV_CLIENT_SYNC_INFO));
+ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL;
+ }
+ else
+ {
+
#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS)
- psAllocDeviceMemOUT->sClientSyncInfo.psSyncData =
- psMemInfo->psKernelSyncInfo->psSyncData;
- psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
- psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
- psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
- psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
+ psAllocDeviceMemOUT->sClientSyncInfo.psSyncData =
+ psMemInfo->psKernelSyncInfo->psSyncData;
+ psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
+ psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
#if defined (SUPPORT_SID_INTERFACE)
- if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
- {
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo,
- psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_SYNC_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
- }
- else
- {
- psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo = 0;
- }
+ if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo,
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo = 0;
+ }
#else
- psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo =
- psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
+ psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo =
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
#endif
#endif
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psAllocDeviceMemOUT->sClientSyncInfo.hKernelSyncInfo,
- psMemInfo->psKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psAllocDeviceMemOUT->sClientSyncInfo.hKernelSyncInfo,
+ psMemInfo->psKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
- psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo =
- &psAllocDeviceMemOUT->sClientSyncInfo;
+ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo =
+ &psAllocDeviceMemOUT->sClientSyncInfo;
- }
+ }
- COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc)
+ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc)
- return 0;
+ return 0;
}
#endif
static IMG_INT
PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hDevCookieInt;
- IMG_VOID *pvKernelMemInfo;
+ IMG_HANDLE hDevCookieInt;
+ IMG_VOID *pvKernelMemInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
- psFreeDeviceMemIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psFreeDeviceMemIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psRetOUT->eError =
+ psRetOUT->eError =
PVRSRVLookupHandle(psPerProc->psHandleBase,
&pvKernelMemInfo,
#if defined (SUPPORT_SID_INTERFACE)
- psFreeDeviceMemIN->hKernelMemInfo,
+ psFreeDeviceMemIN->hKernelMemInfo,
#else
- psFreeDeviceMemIN->psKernelMemInfo,
+ psFreeDeviceMemIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_MEM_INFO);
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psRetOUT->eError = PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo);
+ psRetOUT->eError = PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psRetOUT->eError =
- PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
#if defined (SUPPORT_SID_INTERFACE)
- psFreeDeviceMemIN->hKernelMemInfo,
+ psFreeDeviceMemIN->hKernelMemInfo,
#else
- psFreeDeviceMemIN->psKernelMemInfo,
+ psFreeDeviceMemIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_MEM_INFO);
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVExportDeviceMemBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM *psExportDeviceMemIN,
- PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM *psExportDeviceMemIN,
+ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevCookieInt;
#if defined (SUPPORT_SID_INTERFACE)
- PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
#else
- PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
#endif
- PVR_ASSERT(ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM) ||
- ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2));
- PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+ PVR_ASSERT(ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM) ||
+ ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2));
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
-
- psExportDeviceMemOUT->eError =
+
+ psExportDeviceMemOUT->eError =
PVRSRVLookupHandle(psPerProc->psHandleBase,
&hDevCookieInt,
- psExportDeviceMemIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
+ psExportDeviceMemIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psExportDeviceMemOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find devcookie"));
- return 0;
- }
+ if(psExportDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find devcookie"));
+ return 0;
+ }
-
- psExportDeviceMemOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_PVOID *)&psKernelMemInfo,
+
+ psExportDeviceMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_PVOID *)&psKernelMemInfo,
#if defined (SUPPORT_SID_INTERFACE)
- psExportDeviceMemIN->hKernelMemInfo,
+ psExportDeviceMemIN->hKernelMemInfo,
#else
- psExportDeviceMemIN->psKernelMemInfo,
+ psExportDeviceMemIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_MEM_INFO);
-
- if(psExportDeviceMemOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find kernel meminfo"));
- return 0;
- }
-
-
- psExportDeviceMemOUT->eError =
- PVRSRVFindHandle(KERNEL_HANDLE_BASE,
- &psExportDeviceMemOUT->hMemInfo,
- psKernelMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO);
- if(psExportDeviceMemOUT->eError == PVRSRV_OK)
- {
-
- PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVExportDeviceMemBW: allocation is already exported"));
- return 0;
- }
-
-
- psExportDeviceMemOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
- &psExportDeviceMemOUT->hMemInfo,
- psKernelMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE);
- if (psExportDeviceMemOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: failed to allocate handle from global handle list"));
- return 0;
- }
-
-
- psKernelMemInfo->ui32Flags |= PVRSRV_MEM_EXPORTED;
-
- return 0;
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ if(psExportDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find kernel meminfo"));
+ return 0;
+ }
+
+
+ psExportDeviceMemOUT->eError =
+ PVRSRVFindHandle(KERNEL_HANDLE_BASE,
+ &psExportDeviceMemOUT->hMemInfo,
+ psKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psExportDeviceMemOUT->eError == PVRSRV_OK)
+ {
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVExportDeviceMemBW: allocation is already exported"));
+ return 0;
+ }
+
+
+ psExportDeviceMemOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
+ &psExportDeviceMemOUT->hMemInfo,
+ psKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ if (psExportDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: failed to allocate handle from global handle list"));
+ return 0;
+ }
+
+
+ psKernelMemInfo->ui32Flags |= PVRSRV_MEM_EXPORTED;
+
+ return 0;
}
static IMG_INT
PVRSRVMapDeviceMemoryBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN,
- PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDevMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN,
+ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDevMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = IMG_NULL;
- PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = IMG_NULL;
- IMG_HANDLE hDstDevMemHeap = IMG_NULL;
-
- PVR_ASSERT(ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY) ||
- ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2));
- PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
-
- NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2)
-
-
- psMapDevMemOUT->eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
- (IMG_VOID**)&psSrcKernelMemInfo,
- psMapDevMemIN->hKernelMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO);
- if(psMapDevMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- &hDstDevMemHeap,
- psMapDevMemIN->hDstDevMemHeap,
- PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
- if(psMapDevMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- if (psSrcKernelMemInfo->sShareMemWorkaround.bInUse)
- {
- PVR_DPF((PVR_DBG_MESSAGE, "using the mem wrap workaround."));
-
-
-
-
-
-
-
-
-
- psMapDevMemOUT->eError = BM_XProcWorkaroundSetShareIndex(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex);
- if(psMapDevMemOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryBW(): failed to recycle shared buffer"));
- return 0;
- }
-
- psMapDevMemOUT->eError =
- PVRSRVAllocDeviceMemKM(psSrcKernelMemInfo->sShareMemWorkaround.hDevCookieInt,
- psPerProc,
- hDstDevMemHeap,
- psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqAttribs | PVRSRV_MEM_NO_SYNCOBJ,
- psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqSize,
- psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqAlignment,
- &psDstKernelMemInfo,
- "" );
-
-
- BM_XProcWorkaroundUnsetShareIndex(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex);
- if(psMapDevMemOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "lakjgfgewjlrgebhe"));
- return 0;
- }
-
- if(psSrcKernelMemInfo->psKernelSyncInfo)
- {
- psSrcKernelMemInfo->psKernelSyncInfo->ui32RefCount++;
- }
-
- psDstKernelMemInfo->psKernelSyncInfo = psSrcKernelMemInfo->psKernelSyncInfo;
- }
- else
- {
-
- psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc,
- psSrcKernelMemInfo,
- hDstDevMemHeap,
- &psDstKernelMemInfo);
- if(psMapDevMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
- }
-
-
- psDstKernelMemInfo->sShareMemWorkaround = psSrcKernelMemInfo->sShareMemWorkaround;
-
- OSMemSet(&psMapDevMemOUT->sDstClientMemInfo,
- 0,
- sizeof(psMapDevMemOUT->sDstClientMemInfo));
- OSMemSet(&psMapDevMemOUT->sDstClientSyncInfo,
- 0,
- sizeof(psMapDevMemOUT->sDstClientSyncInfo));
-
- psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM =
- psDstKernelMemInfo->pvLinAddrKM;
-
- psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = 0;
- psMapDevMemOUT->sDstClientMemInfo.sDevVAddr = psDstKernelMemInfo->sDevVAddr;
- psMapDevMemOUT->sDstClientMemInfo.ui32Flags = psDstKernelMemInfo->ui32Flags;
+ PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = IMG_NULL;
+ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = IMG_NULL;
+ IMG_HANDLE hDstDevMemHeap = IMG_NULL;
+
+ PVR_ASSERT(ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY) ||
+ ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2));
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2)
+
+
+ psMapDevMemOUT->eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+ (IMG_VOID**)&psSrcKernelMemInfo,
+ psMapDevMemIN->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psMapDevMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDstDevMemHeap,
+ psMapDevMemIN->hDstDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
+ if(psMapDevMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ if (psSrcKernelMemInfo->sShareMemWorkaround.bInUse)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "using the mem wrap workaround."));
+
+
+
+
+
+
+
+
+
+ psMapDevMemOUT->eError = BM_XProcWorkaroundSetShareIndex(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex);
+ if(psMapDevMemOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryBW(): failed to recycle shared buffer"));
+ return 0;
+ }
+
+ psMapDevMemOUT->eError =
+ PVRSRVAllocDeviceMemKM(psSrcKernelMemInfo->sShareMemWorkaround.hDevCookieInt,
+ psPerProc,
+ hDstDevMemHeap,
+ psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqAttribs | PVRSRV_MEM_NO_SYNCOBJ,
+ psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqSize,
+ psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqAlignment,
+ &psDstKernelMemInfo,
+ "" );
+
+
+ BM_XProcWorkaroundUnsetShareIndex(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex);
+ if(psMapDevMemOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "lakjgfgewjlrgebhe"));
+ return 0;
+ }
+
+ if(psSrcKernelMemInfo->psKernelSyncInfo)
+ {
+ psSrcKernelMemInfo->psKernelSyncInfo->ui32RefCount++;
+ }
+
+ psDstKernelMemInfo->psKernelSyncInfo = psSrcKernelMemInfo->psKernelSyncInfo;
+ }
+ else
+ {
+
+ psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc,
+ psSrcKernelMemInfo,
+ hDstDevMemHeap,
+ &psDstKernelMemInfo);
+ if(psMapDevMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+
+ psDstKernelMemInfo->sShareMemWorkaround = psSrcKernelMemInfo->sShareMemWorkaround;
+
+ OSMemSet(&psMapDevMemOUT->sDstClientMemInfo,
+ 0,
+ sizeof(psMapDevMemOUT->sDstClientMemInfo));
+ OSMemSet(&psMapDevMemOUT->sDstClientSyncInfo,
+ 0,
+ sizeof(psMapDevMemOUT->sDstClientSyncInfo));
+
+ psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM =
+ psDstKernelMemInfo->pvLinAddrKM;
+
+ psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = 0;
+ psMapDevMemOUT->sDstClientMemInfo.sDevVAddr = psDstKernelMemInfo->sDevVAddr;
+ psMapDevMemOUT->sDstClientMemInfo.ui32Flags = psDstKernelMemInfo->ui32Flags;
psMapDevMemOUT->sDstClientMemInfo.uAllocSize = psDstKernelMemInfo->uAllocSize;
#if defined (SUPPORT_SID_INTERFACE)
+
#else
- psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = psDstKernelMemInfo->sMemBlk.hOSMemHandle;
+ psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = psDstKernelMemInfo->sMemBlk.hOSMemHandle;
#endif
-
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo,
- psDstKernelMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE);
- psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = IMG_NULL;
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo,
+ psDstKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = IMG_NULL;
#if defined (SUPPORT_SID_INTERFACE)
-
- if (psDstKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
- {
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapDevMemOUT->sDstClientMemInfo.hMappingInfo,
- psDstKernelMemInfo->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
- }
- else
- {
- psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = 0;
- }
+
+ if (psDstKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevMemOUT->sDstClientMemInfo.hMappingInfo,
+ psDstKernelMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = 0;
+ }
#endif
-
- if(psDstKernelMemInfo->psKernelSyncInfo)
- {
+
+ if(psDstKernelMemInfo->psKernelSyncInfo)
+ {
#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS)
- psMapDevMemOUT->sDstClientSyncInfo.psSyncData =
- psDstKernelMemInfo->psKernelSyncInfo->psSyncData;
- psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr =
- psDstKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
- psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr =
- psDstKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
+ psMapDevMemOUT->sDstClientSyncInfo.psSyncData =
+ psDstKernelMemInfo->psKernelSyncInfo->psSyncData;
+ psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr =
+ psDstKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
+ psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr =
+ psDstKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
#if defined (SUPPORT_SID_INTERFACE)
-
- if (psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
- {
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo,
- psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
- }
- else
- {
- psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo = 0;
- }
+
+ if (psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo,
+ psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo = 0;
+ }
#else
- psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo =
- psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
+ psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo =
+ psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
#endif
#endif
- psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo = &psMapDevMemOUT->sDstClientSyncInfo;
-
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo,
- psDstKernelMemInfo->psKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
- psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
- }
+ psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo = &psMapDevMemOUT->sDstClientSyncInfo;
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo,
+ psDstKernelMemInfo->psKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
+ }
- COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc)
+ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc)
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVUnmapDeviceMemoryBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY *psUnmapDevMemIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY *psUnmapDevMemIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY);
- psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID**)&psKernelMemInfo,
+ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psKernelMemInfo,
#if defined (SUPPORT_SID_INTERFACE)
- psUnmapDevMemIN->hKernelMemInfo,
+ psUnmapDevMemIN->hKernelMemInfo,
#else
- psUnmapDevMemIN->psKernelMemInfo,
+ psUnmapDevMemIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_MEM_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- if (psKernelMemInfo->sShareMemWorkaround.bInUse)
- {
- psRetOUT->eError = PVRSRVFreeDeviceMemKM(psKernelMemInfo->sShareMemWorkaround.hDevCookieInt, psKernelMemInfo);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVUnmapDeviceMemoryBW: internal error, should expect FreeDeviceMem to fail"));
- return 0;
- }
- }
- else
- {
- psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
- }
-
- psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ if (psKernelMemInfo->sShareMemWorkaround.bInUse)
+ {
+ psRetOUT->eError = PVRSRVFreeDeviceMemKM(psKernelMemInfo->sShareMemWorkaround.hDevCookieInt, psKernelMemInfo);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVUnmapDeviceMemoryBW: internal error, should expect FreeDeviceMem to fail"));
+ return 0;
+ }
+ }
+ else
+ {
+ psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
#if defined (SUPPORT_SID_INTERFACE)
- psUnmapDevMemIN->hKernelMemInfo,
+ psUnmapDevMemIN->hKernelMemInfo,
#else
- psUnmapDevMemIN->psKernelMemInfo,
+ psUnmapDevMemIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_MEM_INFO);
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVMapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN,
- PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN,
+ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_MEM_INFO *psMemInfo;
- IMG_HANDLE hOSMapInfo;
- IMG_HANDLE hDeviceClassBufferInt;
- IMG_HANDLE hDevMemContextInt;
- PVRSRV_HANDLE_TYPE eHandleType;
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+ IMG_HANDLE hOSMapInfo;
+ IMG_HANDLE hDeviceClassBufferInt;
+ IMG_HANDLE hDevMemContextInt;
+ PVRSRV_HANDLE_TYPE eHandleType;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY);
- NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2)
+ NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2)
-
- psMapDevClassMemOUT->eError =
+
+ psMapDevClassMemOUT->eError =
PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
- &hDeviceClassBufferInt,
- &eHandleType,
- psMapDevClassMemIN->hDeviceClassBuffer);
+ &hDeviceClassBufferInt,
+ &eHandleType,
+ psMapDevClassMemIN->hDeviceClassBuffer);
- if(psMapDevClassMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
-
- psMapDevClassMemOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &hDevMemContextInt,
- psMapDevClassMemIN->hDevMemContext,
- PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
-
- if(psMapDevClassMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- switch(eHandleType)
- {
+
+ psMapDevClassMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ psMapDevClassMemIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ switch(eHandleType)
+ {
#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
- case PVRSRV_HANDLE_TYPE_DISP_BUFFER:
- case PVRSRV_HANDLE_TYPE_BUF_BUFFER:
+ case PVRSRV_HANDLE_TYPE_DISP_BUFFER:
+ case PVRSRV_HANDLE_TYPE_BUF_BUFFER:
#else
- case PVRSRV_HANDLE_TYPE_NONE:
+ case PVRSRV_HANDLE_TYPE_NONE:
#endif
- break;
- default:
- psMapDevClassMemOUT->eError = PVRSRV_ERROR_INVALID_HANDLE_TYPE;
- return 0;
- }
-
- psMapDevClassMemOUT->eError =
- PVRSRVMapDeviceClassMemoryKM(psPerProc,
- hDevMemContextInt,
- hDeviceClassBufferInt,
- &psMemInfo,
- &hOSMapInfo);
- if(psMapDevClassMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- OSMemSet(&psMapDevClassMemOUT->sClientMemInfo,
- 0,
- sizeof(psMapDevClassMemOUT->sClientMemInfo));
- OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo,
- 0,
- sizeof(psMapDevClassMemOUT->sClientSyncInfo));
-
- psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM =
- psMemInfo->pvLinAddrKM;
-
- psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0;
- psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
- psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
+ break;
+ default:
+ psMapDevClassMemOUT->eError = PVRSRV_ERROR_INVALID_HANDLE_TYPE;
+ return 0;
+ }
+
+ psMapDevClassMemOUT->eError =
+ PVRSRVMapDeviceClassMemoryKM(psPerProc,
+ hDevMemContextInt,
+ hDeviceClassBufferInt,
+ &psMemInfo,
+ &hOSMapInfo);
+ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ OSMemSet(&psMapDevClassMemOUT->sClientMemInfo,
+ 0,
+ sizeof(psMapDevClassMemOUT->sClientMemInfo));
+ OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo,
+ 0,
+ sizeof(psMapDevClassMemOUT->sClientSyncInfo));
+
+ psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM =
+ psMemInfo->pvLinAddrKM;
+
+ psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0;
+ psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
+ psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
psMapDevClassMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize;
#if defined (SUPPORT_SID_INTERFACE)
- if (psMemInfo->sMemBlk.hOSMemHandle != 0)
- {
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapDevClassMemOUT->sClientMemInfo.hMappingInfo,
- psMemInfo->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psMapDevClassMemIN->hDeviceClassBuffer);
- }
- else
- {
- psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = 0;
- }
+ if (psMemInfo->sMemBlk.hOSMemHandle != 0)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevClassMemOUT->sClientMemInfo.hMappingInfo,
+ psMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psMapDevClassMemIN->hDeviceClassBuffer);
+ }
+ else
+ {
+ psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = 0;
+ }
#else
- psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
+ psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
#endif
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo,
- psMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psMapDevClassMemIN->hDeviceClassBuffer);
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo,
+ psMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psMapDevClassMemIN->hDeviceClassBuffer);
- psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL;
+ psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL;
-
- if(psMemInfo->psKernelSyncInfo)
- {
+
+ if(psMemInfo->psKernelSyncInfo)
+ {
#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS)
- psMapDevClassMemOUT->sClientSyncInfo.psSyncData =
- psMemInfo->psKernelSyncInfo->psSyncData;
- psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
- psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
- psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
- psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
+ psMapDevClassMemOUT->sClientSyncInfo.psSyncData =
+ psMemInfo->psKernelSyncInfo->psSyncData;
+ psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
+ psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
#if defined (SUPPORT_SID_INTERFACE)
- if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != 0)
- {
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo,
- psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_SYNC_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
- psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
- }
- else
- {
- psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo = 0;
- }
+ if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != 0)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo,
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo = 0;
+ }
#else
- psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo =
- psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
+ psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo =
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
#endif
#endif
- psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = &psMapDevClassMemOUT->sClientSyncInfo;
-
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo,
- psMemInfo->psKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
- psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
- }
+ psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = &psMapDevClassMemOUT->sClientSyncInfo;
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo,
+ psMemInfo->psKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
- COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc)
+ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc)
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVUnmapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvKernelMemInfo;
+ IMG_VOID *pvKernelMemInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
#if defined (SUPPORT_SID_INTERFACE)
- psUnmapDevClassMemIN->hKernelMemInfo,
+ psUnmapDevClassMemIN->hKernelMemInfo,
#else
- psUnmapDevClassMemIN->psKernelMemInfo,
+ psUnmapDevClassMemIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_MEM_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo);
+ psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psRetOUT->eError =
- PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
#if defined (SUPPORT_SID_INTERFACE)
- psUnmapDevClassMemIN->hKernelMemInfo,
+ psUnmapDevClassMemIN->hKernelMemInfo,
#else
- psUnmapDevClassMemIN->psKernelMemInfo,
+ psUnmapDevClassMemIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_MEM_INFO);
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
- return 0;
+ return 0;
}
#if defined(OS_PVRSRV_WRAP_EXT_MEM_BW)
IMG_INT
PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
- PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc);
+ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
+ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
#else
static IMG_INT
PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
- PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
+ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hDevCookieInt;
- IMG_HANDLE hDevMemContextInt;
- PVRSRV_KERNEL_MEM_INFO *psMemInfo;
- IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemContextInt;
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
IMG_UINT32 ui32PageTableSize = 0;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
-
- NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2)
-
-
- psWrapExtMemOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
- psWrapExtMemIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psWrapExtMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- psWrapExtMemOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
- psWrapExtMemIN->hDevMemContext,
- PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
-
- if(psWrapExtMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- if(psWrapExtMemIN->ui32NumPageTableEntries)
- {
- ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries
- * sizeof(IMG_SYS_PHYADDR);
-
- ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError,
- OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
- ui32PageTableSize,
- (IMG_VOID **)&psSysPAddr, 0,
- "Page Table"));
-
- if(CopyFromUserWrapper(psPerProc,
- ui32BridgeID,
- psSysPAddr,
- psWrapExtMemIN->psSysPAddr,
- ui32PageTableSize) != PVRSRV_OK)
- {
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize, (IMG_VOID *)psSysPAddr, 0);
-
- return -EFAULT;
- }
- }
-
- psWrapExtMemOUT->eError =
- PVRSRVWrapExtMemoryKM(hDevCookieInt,
- psPerProc,
- hDevMemContextInt,
- psWrapExtMemIN->ui32ByteSize,
- psWrapExtMemIN->ui32PageOffset,
- psWrapExtMemIN->bPhysContig,
- psSysPAddr,
- psWrapExtMemIN->pvLinAddr,
- psWrapExtMemIN->ui32Flags,
- &psMemInfo);
-
- if(psWrapExtMemIN->ui32NumPageTableEntries)
- {
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
- ui32PageTableSize,
- (IMG_VOID *)psSysPAddr, 0);
-
- }
-
- if(psWrapExtMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM =
- psMemInfo->pvLinAddrKM;
-
-
- psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0;
- psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
- psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2)
+
+
+ psWrapExtMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psWrapExtMemIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psWrapExtMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psWrapExtMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
+ psWrapExtMemIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psWrapExtMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ if(psWrapExtMemIN->ui32NumPageTableEntries)
+ {
+ ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries
+ * sizeof(IMG_SYS_PHYADDR);
+
+ ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError,
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32PageTableSize,
+ (IMG_VOID **)&psSysPAddr, 0,
+ "Page Table"));
+
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ psSysPAddr,
+ psWrapExtMemIN->psSysPAddr,
+ ui32PageTableSize) != PVRSRV_OK)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize, (IMG_VOID *)psSysPAddr, 0);
+
+ return -EFAULT;
+ }
+ }
+
+ psWrapExtMemOUT->eError =
+ PVRSRVWrapExtMemoryKM(hDevCookieInt,
+ psPerProc,
+ hDevMemContextInt,
+ psWrapExtMemIN->ui32ByteSize,
+ psWrapExtMemIN->ui32PageOffset,
+ psWrapExtMemIN->bPhysContig,
+ psSysPAddr,
+ psWrapExtMemIN->pvLinAddr,
+ psWrapExtMemIN->ui32Flags,
+ &psMemInfo);
+
+ if(psWrapExtMemIN->ui32NumPageTableEntries)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32PageTableSize,
+ (IMG_VOID *)psSysPAddr, 0);
+
+ }
+
+ if(psWrapExtMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM =
+ psMemInfo->pvLinAddrKM;
+
+
+ psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0;
+ psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
+ psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
psWrapExtMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize;
#if defined (SUPPORT_SID_INTERFACE)
#else
- psWrapExtMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
+ psWrapExtMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
#endif
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo,
- psMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo,
+ psMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
#if defined (SUPPORT_SID_INTERFACE)
-
- if (psMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
- {
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psWrapExtMemOUT->sClientMemInfo.hMappingInfo,
- psMemInfo->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
- }
- else
- {
- psWrapExtMemOUT->sClientMemInfo.hMappingInfo = 0;
- }
+
+ if (psMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psWrapExtMemOUT->sClientMemInfo.hMappingInfo,
+ psMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psWrapExtMemOUT->sClientMemInfo.hMappingInfo = 0;
+ }
#endif
-
+
#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS)
- psWrapExtMemOUT->sClientSyncInfo.psSyncData =
- psMemInfo->psKernelSyncInfo->psSyncData;
- psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
- psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
- psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
- psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
+ psWrapExtMemOUT->sClientSyncInfo.psSyncData =
+ psMemInfo->psKernelSyncInfo->psSyncData;
+ psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
+ psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
#if defined (SUPPORT_SID_INTERFACE)
-
- if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
- {
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psWrapExtMemOUT->sClientSyncInfo.hMappingInfo,
- psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
- }
- else
- {
- psWrapExtMemOUT->sClientSyncInfo.hMappingInfo = 0;
- }
+
+ if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psWrapExtMemOUT->sClientSyncInfo.hMappingInfo,
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psWrapExtMemOUT->sClientSyncInfo.hMappingInfo = 0;
+ }
#else
- psWrapExtMemOUT->sClientSyncInfo.hMappingInfo =
- psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
+ psWrapExtMemOUT->sClientSyncInfo.hMappingInfo =
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
#endif
#endif
- psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = &psWrapExtMemOUT->sClientSyncInfo;
+ psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = &psWrapExtMemOUT->sClientSyncInfo;
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psWrapExtMemOUT->sClientSyncInfo.hKernelSyncInfo,
- (IMG_HANDLE)psMemInfo->psKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psWrapExtMemOUT->sClientSyncInfo.hKernelSyncInfo,
+ (IMG_HANDLE)psMemInfo->psKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
- COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc)
+ COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc)
- return 0;
+ return 0;
}
#endif
static IMG_INT
PVRSRVUnwrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvMemInfo;
+ IMG_VOID *pvMemInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvMemInfo,
- psUnwrapExtMemIN->hKernelMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVReleaseHandle(psPerProc->psHandleBase,
- psUnwrapExtMemIN->hKernelMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO);
-
- return 0;
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvMemInfo,
+ psUnwrapExtMemIN->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psUnwrapExtMemIN->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ return 0;
}
static IMG_INT
PVRSRVGetFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN,
- PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN,
+ PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM);
- PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
- psGetFreeDeviceMemOUT->eError =
- PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags,
- &psGetFreeDeviceMemOUT->ui32Total,
- &psGetFreeDeviceMemOUT->ui32Free,
- &psGetFreeDeviceMemOUT->ui32LargestBlock);
+ psGetFreeDeviceMemOUT->eError =
+ PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags,
+ &psGetFreeDeviceMemOUT->ui32Total,
+ &psGetFreeDeviceMemOUT->ui32Free,
+ &psGetFreeDeviceMemOUT->ui32LargestBlock);
- return 0;
+ return 0;
}
static IMG_INT
PVRMMapOSMemHandleToMMapDataBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *psMMapDataIN,
- PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *psMMapDataOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *psMMapDataIN,
+ PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *psMMapDataOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA);
#if defined (__linux__) || defined(__QNXNTO__)
- psMMapDataOUT->eError =
- PVRMMapOSMemHandleToMMapData(psPerProc,
- psMMapDataIN->hMHandle,
- &psMMapDataOUT->ui32MMapOffset,
- &psMMapDataOUT->ui32ByteOffset,
- &psMMapDataOUT->ui32RealByteSize,
- &psMMapDataOUT->ui32UserVAddr);
+ psMMapDataOUT->eError =
+ PVRMMapOSMemHandleToMMapData(psPerProc,
+ psMMapDataIN->hMHandle,
+ &psMMapDataOUT->ui32MMapOffset,
+ &psMMapDataOUT->ui32ByteOffset,
+ &psMMapDataOUT->ui32RealByteSize,
+ &psMMapDataOUT->ui32UserVAddr);
#else
- PVR_UNREFERENCED_PARAMETER(psPerProc);
- PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
- psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
#endif
- return 0;
+ return 0;
}
static IMG_INT
PVRMMapReleaseMMapDataBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA *psMMapDataIN,
- PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA *psMMapDataOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA *psMMapDataIN,
+ PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA *psMMapDataOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RELEASE_MMAP_DATA);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RELEASE_MMAP_DATA);
#if defined (__linux__) || defined(__QNXNTO__)
- psMMapDataOUT->eError =
- PVRMMapReleaseMMapData(psPerProc,
- psMMapDataIN->hMHandle,
- &psMMapDataOUT->bMUnmap,
- &psMMapDataOUT->ui32RealByteSize,
- &psMMapDataOUT->ui32UserVAddr);
+ psMMapDataOUT->eError =
+ PVRMMapReleaseMMapData(psPerProc,
+ psMMapDataIN->hMHandle,
+ &psMMapDataOUT->bMUnmap,
+ &psMMapDataOUT->ui32RealByteSize,
+ &psMMapDataOUT->ui32UserVAddr);
#else
- PVR_UNREFERENCED_PARAMETER(psPerProc);
- PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
- psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
#endif
- return 0;
+ return 0;
}
PVRSRV_BRIDGE_RETURN *psRetOUT,
PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hKernelMemInfo;
+ IMG_HANDLE hKernelMemInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &hKernelMemInfo,
- psChgMemAttribIN->hKernelMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO);
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hKernelMemInfo,
+ psChgMemAttribIN->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psRetOUT->eError =
- PVRSRVChangeDeviceMemoryAttributesKM(hKernelMemInfo, psChgMemAttribIN->ui32Attribs);
+ psRetOUT->eError =
+ PVRSRVChangeDeviceMemoryAttributesKM(hKernelMemInfo, psChgMemAttribIN->ui32Attribs);
- return 0;
+ return 0;
}
#else
static IMG_INT
PVRSRV_BRIDGE_RETURN *psRetOUT,
PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
- PVR_UNREFERENCED_PARAMETER(psChgMemAttribIN);
- PVR_UNREFERENCED_PARAMETER(psRetOUT);
- PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+ PVR_UNREFERENCED_PARAMETER(psChgMemAttribIN);
+ PVR_UNREFERENCED_PARAMETER(psRetOUT);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
- return 0;
+ return 0;
}
#endif
#ifdef PDUMP
static IMG_INT
PDumpIsCaptureFrameBW(IMG_UINT32 ui32BridgeID,
- IMG_VOID *psBridgeIn,
- PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING);
- PVR_UNREFERENCED_PARAMETER(psBridgeIn);
- PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
- psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM();
- psPDumpIsCapturingOUT->eError = PVRSRV_OK;
+ psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM();
+ psPDumpIsCapturingOUT->eError = PVRSRV_OK;
- return 0;
+ return 0;
}
static IMG_INT
PDumpCommentBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT);
- PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
- psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0],
- psPDumpCommentIN->ui32Flags);
- return 0;
+ psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0],
+ psPDumpCommentIN->ui32Flags);
+ return 0;
}
static IMG_INT
PDumpSetFrameBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME);
- PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
- psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame);
+ psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame);
- return 0;
+ return 0;
}
static IMG_INT
PDumpRegWithFlagsBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID **)&psDeviceNode,
- psPDumpRegDumpIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError = PDumpRegWithFlagsKM (psPDumpRegDumpIN->szRegRegion,
- psPDumpRegDumpIN->sHWReg.ui32RegAddr,
- psPDumpRegDumpIN->sHWReg.ui32RegVal,
- psPDumpRegDumpIN->ui32Flags);
-
- return 0;
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psDeviceNode,
+ psPDumpRegDumpIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = PDumpRegWithFlagsKM (psPDumpRegDumpIN->szRegRegion,
+ psPDumpRegDumpIN->sHWReg.ui32RegAddr,
+ psPDumpRegDumpIN->sHWReg.ui32RegVal,
+ psPDumpRegDumpIN->ui32Flags);
+
+ return 0;
}
static IMG_INT
PDumpRegPolBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID **)&psDeviceNode,
- psPDumpRegPolIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- psRetOUT->eError =
- PDumpRegPolWithFlagsKM(psPDumpRegPolIN->szRegRegion,
- psPDumpRegPolIN->sHWReg.ui32RegAddr,
- psPDumpRegPolIN->sHWReg.ui32RegVal,
- psPDumpRegPolIN->ui32Mask,
- psPDumpRegPolIN->ui32Flags,
- PDUMP_POLL_OPERATOR_EQUAL);
-
- return 0;
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psDeviceNode,
+ psPDumpRegPolIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psRetOUT->eError =
+ PDumpRegPolWithFlagsKM(psPDumpRegPolIN->szRegRegion,
+ psPDumpRegPolIN->sHWReg.ui32RegAddr,
+ psPDumpRegPolIN->sHWReg.ui32RegVal,
+ psPDumpRegPolIN->ui32Mask,
+ psPDumpRegPolIN->ui32Flags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+ return 0;
}
static IMG_INT
PDumpMemPolBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvMemInfo;
+ IMG_VOID *pvMemInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvMemInfo,
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvMemInfo,
#if defined (SUPPORT_SID_INTERFACE)
psPDumpMemPolIN->hKernelMemInfo,
#else
- psPDumpMemPolIN->psKernelMemInfo,
+ psPDumpMemPolIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_MEM_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo),
- psPDumpMemPolIN->ui32Offset,
- psPDumpMemPolIN->ui32Value,
- psPDumpMemPolIN->ui32Mask,
- psPDumpMemPolIN->eOperator,
- psPDumpMemPolIN->ui32Flags,
- MAKEUNIQUETAG(pvMemInfo));
-
- return 0;
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo),
+ psPDumpMemPolIN->ui32Offset,
+ psPDumpMemPolIN->ui32Value,
+ psPDumpMemPolIN->ui32Mask,
+ psPDumpMemPolIN->eOperator,
+ psPDumpMemPolIN->ui32Flags,
+ MAKEUNIQUETAG(pvMemInfo));
+
+ return 0;
}
static IMG_INT
PDumpMemBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvMemInfo;
+ IMG_VOID *pvMemInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvMemInfo,
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvMemInfo,
#if defined (SUPPORT_SID_INTERFACE)
- psPDumpMemDumpIN->hKernelMemInfo,
+ psPDumpMemDumpIN->hKernelMemInfo,
#else
- psPDumpMemDumpIN->psKernelMemInfo,
+ psPDumpMemDumpIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_MEM_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PDumpMemUM(psPerProc,
- psPDumpMemDumpIN->pvAltLinAddr,
- psPDumpMemDumpIN->pvLinAddr,
- pvMemInfo,
- psPDumpMemDumpIN->ui32Offset,
- psPDumpMemDumpIN->ui32Bytes,
- psPDumpMemDumpIN->ui32Flags,
- MAKEUNIQUETAG(pvMemInfo));
-
- return 0;
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PDumpMemUM(psPerProc,
+ psPDumpMemDumpIN->pvAltLinAddr,
+ psPDumpMemDumpIN->pvLinAddr,
+ pvMemInfo,
+ psPDumpMemDumpIN->ui32Offset,
+ psPDumpMemDumpIN->ui32Bytes,
+ psPDumpMemDumpIN->ui32Flags,
+ MAKEUNIQUETAG(pvMemInfo));
+
+ return 0;
}
static IMG_INT
PDumpBitmapBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_DEVICE_NODE *psDeviceNode;
- IMG_HANDLE hDevMemContextInt;
-
- PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
-
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID **)&psDeviceNode,
- psPDumpBitmapIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
-
- psRetOUT->eError =
- PVRSRVLookupHandle( psPerProc->psHandleBase,
- &hDevMemContextInt,
- psPDumpBitmapIN->hDevMemContext,
- PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
-
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PDumpBitmapKM(psDeviceNode,
- &psPDumpBitmapIN->szFileName[0],
- psPDumpBitmapIN->ui32FileOffset,
- psPDumpBitmapIN->ui32Width,
- psPDumpBitmapIN->ui32Height,
- psPDumpBitmapIN->ui32StrideInBytes,
- psPDumpBitmapIN->sDevBaseAddr,
- hDevMemContextInt,
- psPDumpBitmapIN->ui32Size,
- psPDumpBitmapIN->ePixelFormat,
- psPDumpBitmapIN->eMemFormat,
- psPDumpBitmapIN->ui32Flags);
-
- return 0;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_HANDLE hDevMemContextInt;
+
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID **)&psDeviceNode,
+ psPDumpBitmapIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle( psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ psPDumpBitmapIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PDumpBitmapKM(psDeviceNode,
+ &psPDumpBitmapIN->szFileName[0],
+ psPDumpBitmapIN->ui32FileOffset,
+ psPDumpBitmapIN->ui32Width,
+ psPDumpBitmapIN->ui32Height,
+ psPDumpBitmapIN->ui32StrideInBytes,
+ psPDumpBitmapIN->sDevBaseAddr,
+ hDevMemContextInt,
+ psPDumpBitmapIN->ui32Size,
+ psPDumpBitmapIN->ePixelFormat,
+ psPDumpBitmapIN->eMemFormat,
+ psPDumpBitmapIN->ui32Flags);
+
+ return 0;
}
static IMG_INT
PDumpReadRegBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID **)&psDeviceNode,
- psPDumpReadRegIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID **)&psDeviceNode,
+ psPDumpReadRegIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
- psRetOUT->eError =
- PDumpReadRegKM(&psPDumpReadRegIN->szRegRegion[0],
- &psPDumpReadRegIN->szFileName[0],
- psPDumpReadRegIN->ui32FileOffset,
- psPDumpReadRegIN->ui32Address,
- psPDumpReadRegIN->ui32Size,
- psPDumpReadRegIN->ui32Flags);
+ psRetOUT->eError =
+ PDumpReadRegKM(&psPDumpReadRegIN->szRegRegion[0],
+ &psPDumpReadRegIN->szFileName[0],
+ psPDumpReadRegIN->ui32FileOffset,
+ psPDumpReadRegIN->ui32Address,
+ psPDumpReadRegIN->ui32Size,
+ psPDumpReadRegIN->ui32Flags);
- return 0;
+ return 0;
}
static IMG_INT
PDumpMemPagesBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES *psPDumpMemPagesIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES *psPDumpMemPagesIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPAGES);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPAGES);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID **)&psDeviceNode,
- psPDumpMemPagesIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psDeviceNode,
+ psPDumpMemPagesIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- return 0;
+ return 0;
}
static IMG_INT
PDumpDriverInfoBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32PDumpFlags;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO);
- PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
- ui32PDumpFlags = 0;
- if(psPDumpDriverInfoIN->bContinuous)
- {
- ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
- }
- psRetOUT->eError =
- PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0],
- ui32PDumpFlags);
+ ui32PDumpFlags = 0;
+ if(psPDumpDriverInfoIN->bContinuous)
+ {
+ ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
+ }
+ psRetOUT->eError =
+ PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0],
+ ui32PDumpFlags);
- return 0;
+ return 0;
}
static IMG_INT
PDumpSyncDumpBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes;
- IMG_VOID *pvSyncInfo;
+ IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes;
+ IMG_VOID *pvSyncInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
#if defined (SUPPORT_SID_INTERFACE)
psPDumpSyncDumpIN->hKernelSyncInfo,
#else
- psPDumpSyncDumpIN->psKernelSyncInfo,
+ psPDumpSyncDumpIN->psKernelSyncInfo,
#endif
- PVRSRV_HANDLE_TYPE_SYNC_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PDumpMemUM(psPerProc,
- psPDumpSyncDumpIN->pvAltLinAddr,
- IMG_NULL,
- ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
- psPDumpSyncDumpIN->ui32Offset,
- ui32Bytes,
- 0,
- MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
-
- return 0;
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PDumpMemUM(psPerProc,
+ psPDumpSyncDumpIN->pvAltLinAddr,
+ IMG_NULL,
+ ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
+ psPDumpSyncDumpIN->ui32Offset,
+ ui32Bytes,
+ 0,
+ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
+
+ return 0;
}
static IMG_INT
PDumpSyncPolBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_UINT32 ui32Offset;
- IMG_VOID *pvSyncInfo;
- IMG_UINT32 ui32Value;
- IMG_UINT32 ui32Mask;
+ IMG_UINT32 ui32Offset;
+ IMG_VOID *pvSyncInfo;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL);
- psRetOUT->eError =
+ psRetOUT->eError =
PVRSRVLookupHandle(psPerProc->psHandleBase,
&pvSyncInfo,
#if defined (SUPPORT_SID_INTERFACE)
- psPDumpSyncPolIN->hKernelSyncInfo,
+ psPDumpSyncPolIN->hKernelSyncInfo,
#else
- psPDumpSyncPolIN->psKernelSyncInfo,
+ psPDumpSyncPolIN->psKernelSyncInfo,
#endif
- PVRSRV_HANDLE_TYPE_SYNC_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- if(psPDumpSyncPolIN->bIsRead)
- {
- ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
- }
- else
- {
- ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
- }
-
-
- if (psPDumpSyncPolIN->bUseLastOpDumpVal)
- {
- if(psPDumpSyncPolIN->bIsRead)
- {
- ui32Value = ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncData->ui32LastReadOpDumpVal;
- }
- else
- {
- ui32Value = ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncData->ui32LastOpDumpVal;
- }
- ui32Mask = 0xffffffff;
- }
- else
- {
- ui32Value = psPDumpSyncPolIN->ui32Value;
- ui32Mask = psPDumpSyncPolIN->ui32Mask;
- }
-
- psRetOUT->eError =
- PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
- ui32Offset,
- ui32Value,
- ui32Mask,
- PDUMP_POLL_OPERATOR_EQUAL,
- 0,
- MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
-
- return 0;
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ if(psPDumpSyncPolIN->bIsRead)
+ {
+ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
+ }
+ else
+ {
+ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
+ }
+
+
+ if (psPDumpSyncPolIN->bUseLastOpDumpVal)
+ {
+ if(psPDumpSyncPolIN->bIsRead)
+ {
+ ui32Value = ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncData->ui32LastReadOpDumpVal;
+ }
+ else
+ {
+ ui32Value = ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncData->ui32LastOpDumpVal;
+ }
+ ui32Mask = 0xffffffff;
+ }
+ else
+ {
+ ui32Value = psPDumpSyncPolIN->ui32Value;
+ ui32Mask = psPDumpSyncPolIN->ui32Mask;
+ }
+
+ psRetOUT->eError =
+ PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
+ ui32Offset,
+ ui32Value,
+ ui32Mask,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
+
+ return 0;
}
static IMG_INT
PDumpCycleCountRegReadBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *psPDumpCycleCountRegReadIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *psPDumpCycleCountRegReadIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID **)&psDeviceNode,
- psPDumpCycleCountRegReadIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psDeviceNode,
+ psPDumpCycleCountRegReadIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- PDumpCycleCountRegRead(&psDeviceNode->sDevId,
- psPDumpCycleCountRegReadIN->ui32RegOffset,
- psPDumpCycleCountRegReadIN->bLastFrame);
+ PDumpCycleCountRegRead(&psDeviceNode->sDevId,
+ psPDumpCycleCountRegReadIN->ui32RegOffset,
+ psPDumpCycleCountRegReadIN->bLastFrame);
- psRetOUT->eError = PVRSRV_OK;
+ psRetOUT->eError = PVRSRV_OK;
- return 0;
+ return 0;
}
static IMG_INT
PDumpPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvMemInfo;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR);
-
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo,
- psPDumpPDDevPAddrIN->hKernelMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
- psPDumpPDDevPAddrIN->ui32Offset,
- psPDumpPDDevPAddrIN->sPDDevPAddr,
- MAKEUNIQUETAG(pvMemInfo),
- PDUMP_PD_UNIQUETAG);
- return 0;
+ IMG_VOID *pvMemInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo,
+ psPDumpPDDevPAddrIN->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
+ psPDumpPDDevPAddrIN->ui32Offset,
+ psPDumpPDDevPAddrIN->sPDDevPAddr,
+ MAKEUNIQUETAG(pvMemInfo),
+ PDUMP_PD_UNIQUETAG);
+ return 0;
}
static IMG_INT
PDumpStartInitPhaseBW(IMG_UINT32 ui32BridgeID,
- IMG_VOID *psBridgeIn,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STARTINITPHASE);
- PVR_UNREFERENCED_PARAMETER(psBridgeIn);
- PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STARTINITPHASE);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
- psRetOUT->eError = PDumpStartInitPhaseKM();
+ psRetOUT->eError = PDumpStartInitPhaseKM();
- return 0;
+ return 0;
}
static IMG_INT
PDumpStopInitPhaseBW(IMG_UINT32 ui32BridgeID,
- IMG_VOID *psBridgeIn,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STOPINITPHASE);
- PVR_UNREFERENCED_PARAMETER(psBridgeIn);
- PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STOPINITPHASE);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
- psRetOUT->eError = PDumpStopInitPhaseKM();
+ psRetOUT->eError = PDumpStopInitPhaseKM();
- return 0;
+ return 0;
}
#endif
static IMG_INT
PVRSRVGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN,
- PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN,
+ PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
#if defined (SUPPORT_SID_INTERFACE)
PVRSRV_MISC_INFO_KM sMiscInfo = {0};
#endif
- PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
#if defined (SUPPORT_SID_INTERFACE)
- sMiscInfo.ui32StateRequest = psGetMiscInfoIN->sMiscInfo.ui32StateRequest;
- sMiscInfo.ui32StatePresent = psGetMiscInfoIN->sMiscInfo.ui32StatePresent;
- sMiscInfo.ui32MemoryStrLen = psGetMiscInfoIN->sMiscInfo.ui32MemoryStrLen;
- sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
-
- OSMemCopy(&sMiscInfo.sCacheOpCtl,
- &psGetMiscInfoIN->sMiscInfo.sCacheOpCtl,
- sizeof(sMiscInfo.sCacheOpCtl));
+ sMiscInfo.ui32StateRequest = psGetMiscInfoIN->sMiscInfo.ui32StateRequest;
+ sMiscInfo.ui32StatePresent = psGetMiscInfoIN->sMiscInfo.ui32StatePresent;
+ sMiscInfo.ui32MemoryStrLen = psGetMiscInfoIN->sMiscInfo.ui32MemoryStrLen;
+ sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
+
+ OSMemCopy(&sMiscInfo.sCacheOpCtl,
+ &psGetMiscInfoIN->sMiscInfo.sCacheOpCtl,
+ sizeof(sMiscInfo.sCacheOpCtl));
#else
- OSMemCopy(&psGetMiscInfoOUT->sMiscInfo,
- &psGetMiscInfoIN->sMiscInfo,
- sizeof(PVRSRV_MISC_INFO));
+ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo,
+ &psGetMiscInfoIN->sMiscInfo,
+ sizeof(PVRSRV_MISC_INFO));
#endif
- if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) &&
- ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0) &&
- ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0))
- {
-
- psGetMiscInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
- return 0;
- }
-
- if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) ||
- ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0) ||
- ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0))
- {
-
+ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) &&
+ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0) &&
+ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0))
+ {
+
+ psGetMiscInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) ||
+ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0) ||
+ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0))
+ {
+
#if defined (SUPPORT_SID_INTERFACE)
- ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError,
- OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
- psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
- (IMG_VOID **)&sMiscInfo.pszMemoryStr, 0,
- "Output string buffer"));
- psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&sMiscInfo);
-
-
- eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
- psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
- sMiscInfo.pszMemoryStr,
- sMiscInfo.ui32MemoryStrLen);
+ ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError,
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
+ (IMG_VOID **)&sMiscInfo.pszMemoryStr, 0,
+ "Output string buffer"));
+ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&sMiscInfo);
+
+
+ eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
+ psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
+ sMiscInfo.pszMemoryStr,
+ sMiscInfo.ui32MemoryStrLen);
#else
- ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError,
- OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
- psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
- (IMG_VOID **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0,
- "Output string buffer"));
-
- psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
-
-
- eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
- psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
- psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
- psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen);
+ ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError,
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
+ (IMG_VOID **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0,
+ "Output string buffer"));
+
+ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
+
+
+ eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
+ psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
+ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
+ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen);
#endif
-
+
#if defined (SUPPORT_SID_INTERFACE)
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
- sMiscInfo.ui32MemoryStrLen,
- (IMG_VOID *)sMiscInfo.pszMemoryStr, 0);
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sMiscInfo.ui32MemoryStrLen,
+ (IMG_VOID *)sMiscInfo.pszMemoryStr, 0);
#else
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
- psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
- (IMG_VOID *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
+ (IMG_VOID *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
#endif
-
- psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
-
- if(eError != PVRSRV_OK)
- {
-
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Error copy to user"));
- return -EFAULT;
- }
- }
- else
- {
+
+ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
+
+ if(eError != PVRSRV_OK)
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Error copy to user"));
+ return -EFAULT;
+ }
+ }
+ else
+ {
#if defined (SUPPORT_SID_INTERFACE)
- psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&sMiscInfo);
+ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&sMiscInfo);
#else
- psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
+ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
#endif
- }
+ }
-
- if (psGetMiscInfoOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+
+ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
-
+
#if defined (SUPPORT_SID_INTERFACE)
- if (sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
+ if (sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
#else
- if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
+ if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
#endif
- {
- psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
- &psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
+ {
+ psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
#if defined (SUPPORT_SID_INTERFACE)
sMiscInfo.sGlobalEventObject.hOSEventKM,
#else
- psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
+ psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
#endif
- PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
- PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
- if (psGetMiscInfoOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
#if defined (SUPPORT_SID_INTERFACE)
- OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.szName,
- sMiscInfo.sGlobalEventObject.szName,
- EVENTOBJNAME_MAXLENGTH);
+ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.szName,
+ sMiscInfo.sGlobalEventObject.szName,
+ EVENTOBJNAME_MAXLENGTH);
#endif
- }
+ }
#if defined (SUPPORT_SID_INTERFACE)
- if (sMiscInfo.hSOCTimerRegisterOSMemHandle)
+ if (sMiscInfo.hSOCTimerRegisterOSMemHandle)
#else
- if (psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle)
+ if (psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle)
#endif
- {
-
- psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
- &psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
+ {
+
+ psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
#if defined (SUPPORT_SID_INTERFACE)
sMiscInfo.hSOCTimerRegisterOSMemHandle,
#else
- psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
+ psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
#endif
- PVRSRV_HANDLE_TYPE_SOC_TIMER,
- PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
-
- if (psGetMiscInfoOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
- }
+ PVRSRV_HANDLE_TYPE_SOC_TIMER,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+
+ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
#if defined (SUPPORT_SID_INTERFACE)
- else
- {
- psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle = 0;
- }
-
-
- psGetMiscInfoOUT->sMiscInfo.ui32StateRequest = sMiscInfo.ui32StateRequest;
- psGetMiscInfoOUT->sMiscInfo.ui32StatePresent = sMiscInfo.ui32StatePresent;
-
- psGetMiscInfoOUT->sMiscInfo.pvSOCTimerRegisterKM = sMiscInfo.pvSOCTimerRegisterKM;
- psGetMiscInfoOUT->sMiscInfo.pvSOCTimerRegisterUM = sMiscInfo.pvSOCTimerRegisterUM;
- psGetMiscInfoOUT->sMiscInfo.pvSOCClockGateRegs = sMiscInfo.pvSOCClockGateRegs;
-
- psGetMiscInfoOUT->sMiscInfo.ui32SOCClockGateRegsSize = sMiscInfo.ui32SOCClockGateRegsSize;
-
- OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.aui32DDKVersion,
- &sMiscInfo.aui32DDKVersion,
- sizeof(psGetMiscInfoOUT->sMiscInfo.aui32DDKVersion));
- OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.sCacheOpCtl,
- &sMiscInfo.sCacheOpCtl,
- sizeof(psGetMiscInfoOUT->sMiscInfo.sCacheOpCtl));
+ else
+ {
+ psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle = 0;
+ }
+
+
+ psGetMiscInfoOUT->sMiscInfo.ui32StateRequest = sMiscInfo.ui32StateRequest;
+ psGetMiscInfoOUT->sMiscInfo.ui32StatePresent = sMiscInfo.ui32StatePresent;
+
+ psGetMiscInfoOUT->sMiscInfo.pvSOCTimerRegisterKM = sMiscInfo.pvSOCTimerRegisterKM;
+ psGetMiscInfoOUT->sMiscInfo.pvSOCTimerRegisterUM = sMiscInfo.pvSOCTimerRegisterUM;
+ psGetMiscInfoOUT->sMiscInfo.pvSOCClockGateRegs = sMiscInfo.pvSOCClockGateRegs;
+
+ psGetMiscInfoOUT->sMiscInfo.ui32SOCClockGateRegsSize = sMiscInfo.ui32SOCClockGateRegsSize;
+
+ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.aui32DDKVersion,
+ &sMiscInfo.aui32DDKVersion,
+ sizeof(psGetMiscInfoOUT->sMiscInfo.aui32DDKVersion));
+ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.sCacheOpCtl,
+ &sMiscInfo.sCacheOpCtl,
+ sizeof(psGetMiscInfoOUT->sMiscInfo.sCacheOpCtl));
#endif
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVConnectBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_CONNECT_SERVICES *psConnectServicesIN,
- PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_CONNECT_SERVICES *psConnectServicesIN,
+ PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES);
#if defined(PDUMP)
-
+
if ((psConnectServicesIN->ui32Flags & SRV_FLAGS_PERSIST) != 0)
{
psPerProc->bPDumpPersistent = IMG_TRUE;
}
#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
-
+
if ((psConnectServicesIN->ui32Flags & SRV_FLAGS_PDUMP_ACTIVE) != 0)
{
psPerProc->bPDumpActive = IMG_TRUE;
}
#endif
#else
- PVR_UNREFERENCED_PARAMETER(psConnectServicesIN);
+ PVR_UNREFERENCED_PARAMETER(psConnectServicesIN);
#endif
- psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData;
- psConnectServicesOUT->eError = PVRSRV_OK;
+ psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData;
+ psConnectServicesOUT->eError = PVRSRV_OK;
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVDisconnectBW(IMG_UINT32 ui32BridgeID,
- IMG_VOID *psBridgeIn,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVR_UNREFERENCED_PARAMETER(psPerProc);
- PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DISCONNECT_SERVICES);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DISCONNECT_SERVICES);
-
- psRetOUT->eError = PVRSRV_OK;
+
+ psRetOUT->eError = PVRSRV_OK;
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVEnumerateDCBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN,
- PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN,
+ PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS);
- psEnumDispClassOUT->eError =
- PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass,
- &psEnumDispClassOUT->ui32NumDevices,
- &psEnumDispClassOUT->ui32DevID[0]);
+ psEnumDispClassOUT->eError =
+ PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass,
+ &psEnumDispClassOUT->ui32NumDevices,
+ &psEnumDispClassOUT->ui32DevID[0]);
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVOpenDCDeviceBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN,
- PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN,
+ PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hDevCookieInt;
- IMG_HANDLE hDispClassInfoInt;
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDispClassInfoInt;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE);
- NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc, 1)
+ NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc, 1)
- psOpenDispClassDeviceOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &hDevCookieInt,
- psOpenDispClassDeviceIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psOpenDispClassDeviceOUT->eError =
- PVRSRVOpenDCDeviceKM(psPerProc,
- psOpenDispClassDeviceIN->ui32DeviceID,
- hDevCookieInt,
- &hDispClassInfoInt);
-
- if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &psOpenDispClassDeviceOUT->hDeviceKM,
- hDispClassInfoInt,
- PVRSRV_HANDLE_TYPE_DISP_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE);
- COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc)
-
- return 0;
+ psOpenDispClassDeviceOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psOpenDispClassDeviceIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psOpenDispClassDeviceOUT->eError =
+ PVRSRVOpenDCDeviceKM(psPerProc,
+ psOpenDispClassDeviceIN->ui32DeviceID,
+ hDevCookieInt,
+ &hDispClassInfoInt);
+
+ if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psOpenDispClassDeviceOUT->hDeviceKM,
+ hDispClassInfoInt,
+ PVRSRV_HANDLE_TYPE_DISP_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc)
+
+ return 0;
}
static IMG_INT
PVRSRVCloseDCDeviceBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfoInt;
+ IMG_VOID *pvDispClassInfoInt;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfoInt,
- psCloseDispClassDeviceIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
-
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, IMG_FALSE);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVReleaseHandle(psPerProc->psHandleBase,
- psCloseDispClassDeviceIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- return 0;
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfoInt,
+ psCloseDispClassDeviceIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, IMG_FALSE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psCloseDispClassDeviceIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ return 0;
}
static IMG_INT
PVRSRVEnumDCFormatsBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN,
- PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN,
+ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfoInt;
+ IMG_VOID *pvDispClassInfoInt;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS);
- psEnumDispClassFormatsOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfoInt,
- psEnumDispClassFormatsIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- if(psEnumDispClassFormatsOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psEnumDispClassFormatsOUT->eError =
- PVRSRVEnumDCFormatsKM(pvDispClassInfoInt,
- &psEnumDispClassFormatsOUT->ui32Count,
- psEnumDispClassFormatsOUT->asFormat);
-
- return 0;
+ psEnumDispClassFormatsOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfoInt,
+ psEnumDispClassFormatsIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psEnumDispClassFormatsOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psEnumDispClassFormatsOUT->eError =
+ PVRSRVEnumDCFormatsKM(pvDispClassInfoInt,
+ &psEnumDispClassFormatsOUT->ui32Count,
+ psEnumDispClassFormatsOUT->asFormat);
+
+ return 0;
}
static IMG_INT
PVRSRVEnumDCDimsBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN,
- PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN,
+ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfoInt;
+ IMG_VOID *pvDispClassInfoInt;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS);
- psEnumDispClassDimsOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfoInt,
- psEnumDispClassDimsIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
-
- if(psEnumDispClassDimsOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psEnumDispClassDimsOUT->eError =
- PVRSRVEnumDCDimsKM(pvDispClassInfoInt,
- &psEnumDispClassDimsIN->sFormat,
- &psEnumDispClassDimsOUT->ui32Count,
- psEnumDispClassDimsOUT->asDim);
-
- return 0;
+ psEnumDispClassDimsOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfoInt,
+ psEnumDispClassDimsIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+
+ if(psEnumDispClassDimsOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psEnumDispClassDimsOUT->eError =
+ PVRSRVEnumDCDimsKM(pvDispClassInfoInt,
+ &psEnumDispClassDimsIN->sFormat,
+ &psEnumDispClassDimsOUT->ui32Count,
+ psEnumDispClassDimsOUT->asDim);
+
+ return 0;
}
static IMG_INT
PVRSRVGetDCSystemBufferBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN,
- PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN,
+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hBufferInt;
- IMG_VOID *pvDispClassInfoInt;
+ IMG_HANDLE hBufferInt;
+ IMG_VOID *pvDispClassInfoInt;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER);
- NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc, 1)
+ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc, 1)
- psGetDispClassSysBufferOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfoInt,
- psGetDispClassSysBufferIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psGetDispClassSysBufferOUT->eError =
- PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt,
- &hBufferInt);
-
- if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psGetDispClassSysBufferOUT->hBuffer,
- hBufferInt,
- PVRSRV_HANDLE_TYPE_DISP_BUFFER,
- (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
- psGetDispClassSysBufferIN->hDeviceKM);
-
- COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc)
-
- return 0;
+ psGetDispClassSysBufferOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfoInt,
+ psGetDispClassSysBufferIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetDispClassSysBufferOUT->eError =
+ PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt,
+ &hBufferInt);
+
+ if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psGetDispClassSysBufferOUT->hBuffer,
+ hBufferInt,
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
+ psGetDispClassSysBufferIN->hDeviceKM);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc)
+
+ return 0;
}
static IMG_INT
PVRSRVGetDCInfoBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN,
- PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN,
+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvDispClassInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_INFO);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_INFO);
- psGetDispClassInfoOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfo,
- psGetDispClassInfoIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- if(psGetDispClassInfoOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psGetDispClassInfoOUT->eError =
- PVRSRVGetDCInfoKM(pvDispClassInfo,
- &psGetDispClassInfoOUT->sDisplayInfo);
-
- return 0;
+ psGetDispClassInfoOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psGetDispClassInfoIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psGetDispClassInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetDispClassInfoOUT->eError =
+ PVRSRVGetDCInfoKM(pvDispClassInfo,
+ &psGetDispClassInfoOUT->sDisplayInfo);
+
+ return 0;
}
static IMG_INT
PVRSRVCreateDCSwapChainBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainIN,
- PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainIN,
+ PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfo;
- IMG_HANDLE hSwapChainInt;
- IMG_UINT32 ui32SwapChainID;
+ IMG_VOID *pvDispClassInfo;
+ IMG_HANDLE hSwapChainInt;
+ IMG_UINT32 ui32SwapChainID;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN);
- NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc, 1)
+ NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc, 1)
- psCreateDispClassSwapChainOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfo,
- psCreateDispClassSwapChainIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
-
- if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- ui32SwapChainID = psCreateDispClassSwapChainIN->ui32SwapChainID;
-
- psCreateDispClassSwapChainOUT->eError =
- PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo,
- psCreateDispClassSwapChainIN->ui32Flags,
- &psCreateDispClassSwapChainIN->sDstSurfAttrib,
- &psCreateDispClassSwapChainIN->sSrcSurfAttrib,
- psCreateDispClassSwapChainIN->ui32BufferCount,
- psCreateDispClassSwapChainIN->ui32OEMFlags,
- &hSwapChainInt,
- &ui32SwapChainID);
-
- if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- psCreateDispClassSwapChainOUT->ui32SwapChainID = ui32SwapChainID;
-
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psCreateDispClassSwapChainOUT->hSwapChain,
- hSwapChainInt,
- PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE,
- psCreateDispClassSwapChainIN->hDeviceKM);
-
- COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc)
-
- return 0;
+ psCreateDispClassSwapChainOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psCreateDispClassSwapChainIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+
+ if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ ui32SwapChainID = psCreateDispClassSwapChainIN->ui32SwapChainID;
+
+ psCreateDispClassSwapChainOUT->eError =
+ PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo,
+ psCreateDispClassSwapChainIN->ui32Flags,
+ &psCreateDispClassSwapChainIN->sDstSurfAttrib,
+ &psCreateDispClassSwapChainIN->sSrcSurfAttrib,
+ psCreateDispClassSwapChainIN->ui32BufferCount,
+ psCreateDispClassSwapChainIN->ui32OEMFlags,
+ &hSwapChainInt,
+ &ui32SwapChainID);
+
+ if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psCreateDispClassSwapChainOUT->ui32SwapChainID = ui32SwapChainID;
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psCreateDispClassSwapChainOUT->hSwapChain,
+ hSwapChainInt,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psCreateDispClassSwapChainIN->hDeviceKM);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc)
+
+ return 0;
}
static IMG_INT
PVRSRVDestroyDCSwapChainBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *psDestroyDispClassSwapChainIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *psDestroyDispClassSwapChainIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvSwapChain;
+ IMG_VOID *pvSwapChain;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain,
- psDestroyDispClassSwapChainIN->hSwapChain,
- PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain,
+ psDestroyDispClassSwapChainIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psRetOUT->eError =
- PVRSRVDestroyDCSwapChainKM(pvSwapChain);
+ psRetOUT->eError =
+ PVRSRVDestroyDCSwapChainKM(pvSwapChain);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psRetOUT->eError =
- PVRSRVReleaseHandle(psPerProc->psHandleBase,
- psDestroyDispClassSwapChainIN->hSwapChain,
- PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psDestroyDispClassSwapChainIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVSetDCDstRectBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfo;
- IMG_VOID *pvSwapChain;
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfo,
- psSetDispClassDstRectIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvSwapChain,
- psSetDispClassDstRectIN->hSwapChain,
- PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSetDispClassDstRectIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSetDispClassDstRectIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psRetOUT->eError =
- PVRSRVSetDCDstRectKM(pvDispClassInfo,
- pvSwapChain,
- &psSetDispClassDstRectIN->sRect);
+ psRetOUT->eError =
+ PVRSRVSetDCDstRectKM(pvDispClassInfo,
+ pvSwapChain,
+ &psSetDispClassDstRectIN->sRect);
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVSetDCSrcRectBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfo;
- IMG_VOID *pvSwapChain;
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfo,
- psSetDispClassSrcRectIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvSwapChain,
- psSetDispClassSrcRectIN->hSwapChain,
- PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVSetDCSrcRectKM(pvDispClassInfo,
- pvSwapChain,
- &psSetDispClassSrcRectIN->sRect);
-
- return 0;
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSetDispClassSrcRectIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSetDispClassSrcRectIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVSetDCSrcRectKM(pvDispClassInfo,
+ pvSwapChain,
+ &psSetDispClassSrcRectIN->sRect);
+
+ return 0;
}
static IMG_INT
PVRSRVSetDCDstColourKeyBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfo;
- IMG_VOID *pvSwapChain;
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfo,
- psSetDispClassColKeyIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvSwapChain,
- psSetDispClassColKeyIN->hSwapChain,
- PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVSetDCDstColourKeyKM(pvDispClassInfo,
- pvSwapChain,
- psSetDispClassColKeyIN->ui32CKColour);
-
- return 0;
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSetDispClassColKeyIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSetDispClassColKeyIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVSetDCDstColourKeyKM(pvDispClassInfo,
+ pvSwapChain,
+ psSetDispClassColKeyIN->ui32CKColour);
+
+ return 0;
}
static IMG_INT
PVRSRVSetDCSrcColourKeyBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfo;
- IMG_VOID *pvSwapChain;
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfo,
- psSetDispClassColKeyIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvSwapChain,
- psSetDispClassColKeyIN->hSwapChain,
- PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo,
- pvSwapChain,
- psSetDispClassColKeyIN->ui32CKColour);
-
- return 0;
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSetDispClassColKeyIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSetDispClassColKeyIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo,
+ pvSwapChain,
+ psSetDispClassColKeyIN->ui32CKColour);
+
+ return 0;
}
static IMG_INT
PVRSRVGetDCBuffersBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN,
- PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN,
+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfo;
- IMG_VOID *pvSwapChain;
- IMG_UINT32 i;
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
+ IMG_UINT32 i;
#if defined (SUPPORT_SID_INTERFACE)
- IMG_HANDLE *pahBuffer;
+ IMG_HANDLE *pahBuffer;
#endif
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS);
- NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc, PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
+ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc, PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
- psGetDispClassBuffersOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfo,
- psGetDispClassBuffersIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psGetDispClassBuffersOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvSwapChain,
- psGetDispClassBuffersIN->hSwapChain,
- PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
- if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ psGetDispClassBuffersOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psGetDispClassBuffersIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetDispClassBuffersOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psGetDispClassBuffersIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
#if defined (SUPPORT_SID_INTERFACE)
- psGetDispClassBuffersOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
- sizeof(IMG_HANDLE) * PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS,
- (IMG_PVOID *)&pahBuffer, 0,
- "Temp Swapchain Buffers");
-
- if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ psGetDispClassBuffersOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(IMG_HANDLE) * PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS,
+ (IMG_PVOID *)&pahBuffer, 0,
+ "Temp Swapchain Buffers");
+
+ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
#endif
- psGetDispClassBuffersOUT->eError =
- PVRSRVGetDCBuffersKM(pvDispClassInfo,
- pvSwapChain,
- &psGetDispClassBuffersOUT->ui32BufferCount,
+ psGetDispClassBuffersOUT->eError =
+ PVRSRVGetDCBuffersKM(pvDispClassInfo,
+ pvSwapChain,
+ &psGetDispClassBuffersOUT->ui32BufferCount,
#if defined (SUPPORT_SID_INTERFACE)
pahBuffer);
#else
psGetDispClassBuffersOUT->ahBuffer);
#endif
- if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
+ PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
- for(i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++)
- {
+ for(i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++)
+ {
#if defined (SUPPORT_SID_INTERFACE)
- IMG_SID hBufferExt;
+ IMG_SID hBufferExt;
#else
- IMG_HANDLE hBufferExt;
+ IMG_HANDLE hBufferExt;
#endif
-
+
#if defined (SUPPORT_SID_INTERFACE)
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &hBufferExt,
- pahBuffer[i],
- PVRSRV_HANDLE_TYPE_DISP_BUFFER,
- (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
- psGetDispClassBuffersIN->hSwapChain);
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &hBufferExt,
+ pahBuffer[i],
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
+ psGetDispClassBuffersIN->hSwapChain);
#else
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &hBufferExt,
- psGetDispClassBuffersOUT->ahBuffer[i],
- PVRSRV_HANDLE_TYPE_DISP_BUFFER,
- (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
- psGetDispClassBuffersIN->hSwapChain);
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &hBufferExt,
+ psGetDispClassBuffersOUT->ahBuffer[i],
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
+ psGetDispClassBuffersIN->hSwapChain);
#endif
- psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt;
- }
+ psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt;
+ }
#if defined (SUPPORT_SID_INTERFACE)
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
- sizeof(IMG_HANDLE) * PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS,
- (IMG_PVOID)pahBuffer, 0);
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(IMG_HANDLE) * PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS,
+ (IMG_PVOID)pahBuffer, 0);
#endif
- COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc)
+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc)
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVSwapToDCBufferBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfo;
- IMG_VOID *pvSwapChainBuf;
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChainBuf;
#if defined (SUPPORT_SID_INTERFACE)
- IMG_HANDLE hPrivateTag;
+ IMG_HANDLE hPrivateTag;
#endif
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfo,
- psSwapDispClassBufferIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVLookupSubHandle(psPerProc->psHandleBase,
- &pvSwapChainBuf,
- psSwapDispClassBufferIN->hBuffer,
- PVRSRV_HANDLE_TYPE_DISP_BUFFER,
- psSwapDispClassBufferIN->hDeviceKM);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSwapDispClassBufferIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
+ &pvSwapChainBuf,
+ psSwapDispClassBufferIN->hBuffer,
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ psSwapDispClassBufferIN->hDeviceKM);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
#if defined (SUPPORT_SID_INTERFACE)
- if (psSwapDispClassBufferIN->hPrivateTag != 0)
- {
- psRetOUT->eError =
- PVRSRVLookupSubHandle(psPerProc->psHandleBase,
- &hPrivateTag,
- psSwapDispClassBufferIN->hPrivateTag,
- PVRSRV_HANDLE_TYPE_DISP_BUFFER,
- psSwapDispClassBufferIN->hDeviceKM);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
- }
- else
- {
- hPrivateTag = IMG_NULL;
- }
+ if (psSwapDispClassBufferIN->hPrivateTag != 0)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
+ &hPrivateTag,
+ psSwapDispClassBufferIN->hPrivateTag,
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ psSwapDispClassBufferIN->hDeviceKM);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ hPrivateTag = IMG_NULL;
+ }
#endif
- psRetOUT->eError =
- PVRSRVSwapToDCBufferKM(pvDispClassInfo,
- pvSwapChainBuf,
- psSwapDispClassBufferIN->ui32SwapInterval,
+ psRetOUT->eError =
+ PVRSRVSwapToDCBufferKM(pvDispClassInfo,
+ pvSwapChainBuf,
+ psSwapDispClassBufferIN->ui32SwapInterval,
#if defined (SUPPORT_SID_INTERFACE)
hPrivateTag,
#else
- psSwapDispClassBufferIN->hPrivateTag,
+ psSwapDispClassBufferIN->hPrivateTag,
#endif
- psSwapDispClassBufferIN->ui32ClipRectCount,
- psSwapDispClassBufferIN->sClipRect);
+ psSwapDispClassBufferIN->ui32ClipRectCount,
+ psSwapDispClassBufferIN->sClipRect);
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVSwapToDCSystemBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvDispClassInfo;
- IMG_VOID *pvSwapChain;
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvDispClassInfo,
- psSwapDispClassSystemIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_DISP_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVLookupSubHandle(psPerProc->psHandleBase,
- &pvSwapChain,
- psSwapDispClassSystemIN->hSwapChain,
- PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
- psSwapDispClassSystemIN->hDeviceKM);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
- psRetOUT->eError =
- PVRSRVSwapToDCSystemKM(pvDispClassInfo,
- pvSwapChain);
-
- return 0;
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSwapDispClassSystemIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSwapDispClassSystemIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
+ psSwapDispClassSystemIN->hDeviceKM);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ psRetOUT->eError =
+ PVRSRVSwapToDCSystemKM(pvDispClassInfo,
+ pvSwapChain);
+
+ return 0;
}
static IMG_INT
PVRSRVOpenBCDeviceBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN,
- PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN,
+ PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hDevCookieInt;
- IMG_HANDLE hBufClassInfo;
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hBufClassInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE);
- NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc, 1)
+ NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc, 1)
- psOpenBufferClassDeviceOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &hDevCookieInt,
- psOpenBufferClassDeviceIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psOpenBufferClassDeviceOUT->eError =
- PVRSRVOpenBCDeviceKM(psPerProc,
- psOpenBufferClassDeviceIN->ui32DeviceID,
- hDevCookieInt,
- &hBufClassInfo);
- if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &psOpenBufferClassDeviceOUT->hDeviceKM,
- hBufClassInfo,
- PVRSRV_HANDLE_TYPE_BUF_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE);
-
- COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc)
-
- return 0;
+ psOpenBufferClassDeviceOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psOpenBufferClassDeviceIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psOpenBufferClassDeviceOUT->eError =
+ PVRSRVOpenBCDeviceKM(psPerProc,
+ psOpenBufferClassDeviceIN->ui32DeviceID,
+ hDevCookieInt,
+ &hBufClassInfo);
+ if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psOpenBufferClassDeviceOUT->hDeviceKM,
+ hBufClassInfo,
+ PVRSRV_HANDLE_TYPE_BUF_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc)
+
+ return 0;
}
static IMG_INT
PVRSRVCloseBCDeviceBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvBufClassInfo;
+ IMG_VOID *pvBufClassInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvBufClassInfo,
+ psCloseBufferClassDeviceIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_BUF_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVCloseBCDeviceKM(pvBufClassInfo, IMG_FALSE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE);
+ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psCloseBufferClassDeviceIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_BUF_INFO);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvBufClassInfo,
- psCloseBufferClassDeviceIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_BUF_INFO);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError =
- PVRSRVCloseBCDeviceKM(pvBufClassInfo, IMG_FALSE);
-
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
- psCloseBufferClassDeviceIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_BUF_INFO);
-
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVGetBCInfoBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN,
- PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN,
+ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvBufClassInfo;
+ IMG_VOID *pvBufClassInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO);
- psGetBufferClassInfoOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvBufClassInfo,
- psGetBufferClassInfoIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_BUF_INFO);
- if(psGetBufferClassInfoOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psGetBufferClassInfoOUT->eError =
- PVRSRVGetBCInfoKM(pvBufClassInfo,
- &psGetBufferClassInfoOUT->sBufferInfo);
- return 0;
+ psGetBufferClassInfoOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvBufClassInfo,
+ psGetBufferClassInfoIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_BUF_INFO);
+ if(psGetBufferClassInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetBufferClassInfoOUT->eError =
+ PVRSRVGetBCInfoKM(pvBufClassInfo,
+ &psGetBufferClassInfoOUT->sBufferInfo);
+ return 0;
}
static IMG_INT
PVRSRVGetBCBufferBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN,
- PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN,
+ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_VOID *pvBufClassInfo;
- IMG_HANDLE hBufferInt;
+ IMG_VOID *pvBufClassInfo;
+ IMG_HANDLE hBufferInt;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER);
- NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc, 1)
+ NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc, 1)
- psGetBufferClassBufferOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- &pvBufClassInfo,
- psGetBufferClassBufferIN->hDeviceKM,
- PVRSRV_HANDLE_TYPE_BUF_INFO);
- if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psGetBufferClassBufferOUT->eError =
- PVRSRVGetBCBufferKM(pvBufClassInfo,
- psGetBufferClassBufferIN->ui32BufferIndex,
- &hBufferInt);
-
- if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
-
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psGetBufferClassBufferOUT->hBuffer,
- hBufferInt,
- PVRSRV_HANDLE_TYPE_BUF_BUFFER,
- (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
- psGetBufferClassBufferIN->hDeviceKM);
-
- COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc)
-
- return 0;
+ psGetBufferClassBufferOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvBufClassInfo,
+ psGetBufferClassBufferIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_BUF_INFO);
+ if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetBufferClassBufferOUT->eError =
+ PVRSRVGetBCBufferKM(pvBufClassInfo,
+ psGetBufferClassBufferIN->ui32BufferIndex,
+ &hBufferInt);
+
+ if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psGetBufferClassBufferOUT->hBuffer,
+ hBufferInt,
+ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
+ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
+ psGetBufferClassBufferIN->hDeviceKM);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc)
+
+ return 0;
}
static IMG_INT
PVRSRVAllocSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN,
- PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN,
+ PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM);
- NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1)
+ NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1)
- psAllocSharedSysMemOUT->eError =
- PVRSRVAllocSharedSysMemoryKM(psPerProc,
- psAllocSharedSysMemIN->ui32Flags,
- psAllocSharedSysMemIN->ui32Size,
- &psKernelMemInfo);
- if(psAllocSharedSysMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ psAllocSharedSysMemOUT->eError =
+ PVRSRVAllocSharedSysMemoryKM(psPerProc,
+ psAllocSharedSysMemIN->ui32Flags,
+ psAllocSharedSysMemIN->ui32Size,
+ &psKernelMemInfo);
+ if(psAllocSharedSysMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo,
- 0,
- sizeof(psAllocSharedSysMemOUT->sClientMemInfo));
+ OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo,
+ 0,
+ sizeof(psAllocSharedSysMemOUT->sClientMemInfo));
- psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
- psKernelMemInfo->pvLinAddrKM;
+ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
+ psKernelMemInfo->pvLinAddrKM;
- psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0;
- psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags =
- psKernelMemInfo->ui32Flags;
+ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0;
+ psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags =
+ psKernelMemInfo->ui32Flags;
psAllocSharedSysMemOUT->sClientMemInfo.uAllocSize =
psKernelMemInfo->uAllocSize;
#if defined (SUPPORT_SID_INTERFACE)
- if (psKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
- {
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo,
- psKernelMemInfo->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE);
- }
- else
- {
- psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = 0;
- }
+ if (psKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo,
+ psKernelMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ }
+ else
+ {
+ psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = 0;
+ }
#else
- psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
+ psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
#endif
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
- psKernelMemInfo,
- PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
+ psKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
- COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc)
+ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc)
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVFreeSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN,
- PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN,
+ PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM);
- psFreeSharedSysMemOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID **)&psKernelMemInfo,
+ psFreeSharedSysMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psKernelMemInfo,
#if defined (SUPPORT_SID_INTERFACE)
- psFreeSharedSysMemIN->hKernelMemInfo,
+ psFreeSharedSysMemIN->hKernelMemInfo,
#else
- psFreeSharedSysMemIN->psKernelMemInfo,
+ psFreeSharedSysMemIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
- if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
- return 0;
+ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
+ return 0;
- psFreeSharedSysMemOUT->eError =
- PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
- if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
- return 0;
+ psFreeSharedSysMemOUT->eError =
+ PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
+ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
+ return 0;
#if defined (SUPPORT_SID_INTERFACE)
- if (psFreeSharedSysMemIN->hMappingInfo != 0)
- {
- psFreeSharedSysMemOUT->eError =
- PVRSRVReleaseHandle(psPerProc->psHandleBase,
- psFreeSharedSysMemIN->hMappingInfo,
- PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
- if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
- }
+ if (psFreeSharedSysMemIN->hMappingInfo != 0)
+ {
+ psFreeSharedSysMemOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psFreeSharedSysMemIN->hMappingInfo,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
+ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
#endif
- psFreeSharedSysMemOUT->eError =
- PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psFreeSharedSysMemOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
#if defined (SUPPORT_SID_INTERFACE)
- psFreeSharedSysMemIN->hKernelMemInfo,
+ psFreeSharedSysMemIN->hKernelMemInfo,
#else
- psFreeSharedSysMemIN->psKernelMemInfo,
+ psFreeSharedSysMemIN->psKernelMemInfo,
#endif
- PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
- return 0;
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
+ return 0;
}
static IMG_INT
PVRSRVMapMemInfoMemBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN,
- PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN,
+ PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
- PVRSRV_HANDLE_TYPE eHandleType;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+ PVRSRV_HANDLE_TYPE eHandleType;
#if defined (SUPPORT_SID_INTERFACE)
- IMG_SID hParent;
+ IMG_SID hParent;
#else
- IMG_HANDLE hParent;
+ IMG_HANDLE hParent;
#endif
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM);
- NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2)
+ NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2)
- psMapMemInfoMemOUT->eError =
- PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
- (IMG_VOID **)&psKernelMemInfo,
- &eHandleType,
- psMapMemInfoMemIN->hKernelMemInfo);
- if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- switch (eHandleType)
- {
+ psMapMemInfoMemOUT->eError =
+ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
+ (IMG_VOID **)&psKernelMemInfo,
+ &eHandleType,
+ psMapMemInfoMemIN->hKernelMemInfo);
+ if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ switch (eHandleType)
+ {
#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
- case PVRSRV_HANDLE_TYPE_MEM_INFO:
- case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
- case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
+ case PVRSRV_HANDLE_TYPE_MEM_INFO:
+ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
+ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
#else
- case PVRSRV_HANDLE_TYPE_NONE:
+ case PVRSRV_HANDLE_TYPE_NONE:
#endif
- break;
- default:
- psMapMemInfoMemOUT->eError = PVRSRV_ERROR_INVALID_HANDLE_TYPE;
- return 0;
- }
-
-
- psMapMemInfoMemOUT->eError =
- PVRSRVGetParentHandle(psPerProc->psHandleBase,
- &hParent,
- psMapMemInfoMemIN->hKernelMemInfo,
- eHandleType);
- if (psMapMemInfoMemOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ break;
+ default:
+ psMapMemInfoMemOUT->eError = PVRSRV_ERROR_INVALID_HANDLE_TYPE;
+ return 0;
+ }
+
+
+ psMapMemInfoMemOUT->eError =
+ PVRSRVGetParentHandle(psPerProc->psHandleBase,
+ &hParent,
+ psMapMemInfoMemIN->hKernelMemInfo,
+ eHandleType);
+ if (psMapMemInfoMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
#if defined (SUPPORT_SID_INTERFACE)
- if (hParent == 0)
+ if (hParent == 0)
#else
- if (hParent == IMG_NULL)
+ if (hParent == IMG_NULL)
#endif
- {
- hParent = psMapMemInfoMemIN->hKernelMemInfo;
- }
-
- OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo,
- 0,
- sizeof(psMapMemInfoMemOUT->sClientMemInfo));
-
- psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
- psKernelMemInfo->pvLinAddrKM;
-
- psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0;
- psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr =
- psKernelMemInfo->sDevVAddr;
- psMapMemInfoMemOUT->sClientMemInfo.ui32Flags =
- psKernelMemInfo->ui32Flags;
+ {
+ hParent = psMapMemInfoMemIN->hKernelMemInfo;
+ }
+
+ OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo,
+ 0,
+ sizeof(psMapMemInfoMemOUT->sClientMemInfo));
+
+ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
+ psKernelMemInfo->pvLinAddrKM;
+
+ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0;
+ psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr =
+ psKernelMemInfo->sDevVAddr;
+ psMapMemInfoMemOUT->sClientMemInfo.ui32Flags =
+ psKernelMemInfo->ui32Flags;
psMapMemInfoMemOUT->sClientMemInfo.uAllocSize =
psKernelMemInfo->uAllocSize;
#if defined (SUPPORT_SID_INTERFACE)
- if (psKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
- {
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo,
- psKernelMemInfo->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
- hParent);
- }
- else
- {
- psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = 0;
- }
+ if (psKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo,
+ psKernelMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ hParent);
+ }
+ else
+ {
+ psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = 0;
+ }
#else
- psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
+ psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
#endif
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
- psKernelMemInfo,
- PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
- hParent);
-
- if(psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
- {
-
- OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo,
- 0,
- sizeof (PVRSRV_CLIENT_SYNC_INFO));
- }
- else
- {
-
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
+ psKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ hParent);
+
+ if(psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
+ {
+
+ OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo,
+ 0,
+ sizeof (PVRSRV_CLIENT_SYNC_INFO));
+ }
+ else
+ {
+
#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS)
- psMapMemInfoMemOUT->sClientSyncInfo.psSyncData =
- psKernelMemInfo->psKernelSyncInfo->psSyncData;
- psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
- psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
- psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
- psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
+ psMapMemInfoMemOUT->sClientSyncInfo.psSyncData =
+ psKernelMemInfo->psKernelSyncInfo->psSyncData;
+ psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
+ psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
+ psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
+ psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
#if defined (SUPPORT_SID_INTERFACE)
- if (psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
- {
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo,
- psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
- PVRSRV_HANDLE_TYPE_SYNC_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
- psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
- }
- else
- {
- psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo = 0;
- }
+ if (psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo,
+ psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo = 0;
+ }
#else
- psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo =
- psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
+ psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo =
+ psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
#endif
#endif
- psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = &psMapMemInfoMemOUT->sClientSyncInfo;
-
- PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
- &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo,
- psKernelMemInfo->psKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
- psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
- }
-
- COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc)
-
- return 0;
-}
+ psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = &psMapMemInfoMemOUT->sClientSyncInfo;
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo,
+ psKernelMemInfo->psKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc)
-static IMG_INT
-MMU_GetPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrIN,
- PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
-{
- IMG_HANDLE hDevMemContextInt;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR);
-
- psGetMmuPDDevPAddrOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
- psGetMmuPDDevPAddrIN->hDevMemContext,
- PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
- if(psGetMmuPDDevPAddrOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psGetMmuPDDevPAddrOUT->sPDDevPAddr =
- BM_GetDeviceNode(hDevMemContextInt)->pfnMMUGetPDDevPAddr(BM_GetMMUContextFromMemContext(hDevMemContextInt));
- if(psGetMmuPDDevPAddrOUT->sPDDevPAddr.uiAddr)
- {
- psGetMmuPDDevPAddrOUT->eError = PVRSRV_OK;
- }
- else
- {
- psGetMmuPDDevPAddrOUT->eError = PVRSRV_ERROR_INVALID_PHYS_ADDR;
- }
- return 0;
+ return 0;
}
IMG_INT
DummyBW(IMG_UINT32 ui32BridgeID,
- IMG_VOID *psBridgeIn,
- IMG_VOID *psBridgeOut,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ IMG_VOID *psBridgeIn,
+ IMG_VOID *psBridgeOut,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
#if !defined(DEBUG)
- PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
#endif
- PVR_UNREFERENCED_PARAMETER(psBridgeIn);
- PVR_UNREFERENCED_PARAMETER(psBridgeOut);
- PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
#if defined(DEBUG_BRIDGE_KM)
- PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %u (%s) mapped to "
- "Dummy Wrapper (probably not what you want!)",
- __FUNCTION__, ui32BridgeID, g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %u (%s) mapped to "
+ "Dummy Wrapper (probably not what you want!)",
+ __FUNCTION__, ui32BridgeID, g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
#else
- PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %u mapped to "
- "Dummy Wrapper (probably not what you want!)",
- __FUNCTION__, ui32BridgeID));
+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %u mapped to "
+ "Dummy Wrapper (probably not what you want!)",
+ __FUNCTION__, ui32BridgeID));
#endif
- return -ENOTTY;
+ return -ENOTTY;
}
IMG_VOID
_SetDispatchTableEntry(IMG_UINT32 ui32Index,
- const IMG_CHAR *pszIOCName,
- BridgeWrapperFunction pfFunction,
- const IMG_CHAR *pszFunctionName)
+ const IMG_CHAR *pszIOCName,
+ BridgeWrapperFunction pfFunction,
+ const IMG_CHAR *pszFunctionName)
{
- static IMG_UINT32 ui32PrevIndex = ~0UL;
+ static IMG_UINT32 ui32PrevIndex = ~0UL;
#if !defined(DEBUG)
- PVR_UNREFERENCED_PARAMETER(pszIOCName);
+ PVR_UNREFERENCED_PARAMETER(pszIOCName);
#endif
#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
- PVR_UNREFERENCED_PARAMETER(pszFunctionName);
+ PVR_UNREFERENCED_PARAMETER(pszFunctionName);
#endif
#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
-
- PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName));
+
+ PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName));
#endif
-
- if(g_BridgeDispatchTable[ui32Index].pfFunction)
- {
+
+ if(g_BridgeDispatchTable[ui32Index].pfFunction)
+ {
#if defined(DEBUG_BRIDGE_KM)
- PVR_DPF((PVR_DBG_ERROR,
- "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s",
- __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName));
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s",
+ __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName));
#else
- PVR_DPF((PVR_DBG_ERROR,
- "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%u)",
- __FUNCTION__, pszIOCName, ui32Index));
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%u)",
+ __FUNCTION__, pszIOCName, ui32Index));
#endif
- PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
- }
-
-
- if((ui32PrevIndex != ~0UL) &&
- ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
- (ui32Index <= ui32PrevIndex)))
- {
+ PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
+ }
+
+
+ if((ui32PrevIndex != ~0UL) &&
+ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
+ (ui32Index <= ui32PrevIndex)))
+ {
#if defined(DEBUG_BRIDGE_KM)
- PVR_DPF((PVR_DBG_WARNING,
- "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)",
- __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
- ui32Index, pszIOCName));
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)",
+ __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
+ ui32Index, pszIOCName));
#else
- PVR_DPF((PVR_DBG_WARNING,
- "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
- __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
+ __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
#endif
- PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
- }
+ PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
+ }
- g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
+ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
#if defined(DEBUG_BRIDGE_KM)
- g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
- g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
- g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
- g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
+ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
+ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
#endif
- ui32PrevIndex = ui32Index;
+ ui32PrevIndex = ui32Index;
}
static IMG_INT
PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID,
- IMG_VOID *psBridgeIn,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT);
- PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
-
- if((OSProcHasPrivSrvInit() == IMG_FALSE) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
- {
- psRetOUT->eError = PVRSRV_ERROR_SRV_CONNECT_FAILED;
- return 0;
- }
+
+ if((OSProcHasPrivSrvInit() == IMG_FALSE) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
+ {
+ psRetOUT->eError = PVRSRV_ERROR_SRV_CONNECT_FAILED;
+ return 0;
+ }
#if defined (__linux__) || defined(__QNXNTO__)
- PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE);
+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE);
#endif
- psPerProc->bInitProcess = IMG_TRUE;
+ psPerProc->bInitProcess = IMG_TRUE;
- psRetOUT->eError = PVRSRV_OK;
+ psRetOUT->eError = PVRSRV_OK;
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVInitSrvDisconnectBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_DISCONNECT);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_DISCONNECT);
- if(!psPerProc->bInitProcess)
- {
- psRetOUT->eError = PVRSRV_ERROR_SRV_DISCONNECT_FAILED;
- return 0;
- }
+ if(!psPerProc->bInitProcess)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_SRV_DISCONNECT_FAILED;
+ return 0;
+ }
- psPerProc->bInitProcess = IMG_FALSE;
+ psPerProc->bInitProcess = IMG_FALSE;
- PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE);
- PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE);
+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE);
+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE);
- psRetOUT->eError = PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful);
+ psRetOUT->eError = PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful);
- PVRSRVSetInitServerState( PVRSRV_INIT_SERVER_SUCCESSFUL ,
- ((psRetOUT->eError == PVRSRV_OK) && (psInitSrvDisconnectIN->bInitSuccesful))
- ? IMG_TRUE : IMG_FALSE);
+ PVRSRVSetInitServerState( PVRSRV_INIT_SERVER_SUCCESSFUL ,
+ ((psRetOUT->eError == PVRSRV_OK) && (psInitSrvDisconnectIN->bInitSuccesful))
+ ? IMG_TRUE : IMG_FALSE);
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVEventObjectWaitBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hOSEventKM;
+ IMG_HANDLE hOSEventKM;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
- psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- &hOSEventKM,
- psEventObjectWaitIN->hOSEventKM,
- PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hOSEventKM,
+ psEventObjectWaitIN->hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psRetOUT->eError = OSEventObjectWaitKM(hOSEventKM);
+ psRetOUT->eError = OSEventObjectWaitKM(hOSEventKM);
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVEventObjectOpenBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN,
- PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN,
+ PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
#if defined (SUPPORT_SID_INTERFACE)
- PVRSRV_EVENTOBJECT_KM sEventObject;
- IMG_HANDLE hOSEvent;
+ PVRSRV_EVENTOBJECT_KM sEventObject;
+ IMG_HANDLE hOSEvent;
#endif
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN);
- NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1)
+ NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1)
- psEventObjectOpenOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
+ psEventObjectOpenOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
#if defined (SUPPORT_SID_INTERFACE)
- &sEventObject.hOSEventKM,
+ &sEventObject.hOSEventKM,
#else
- &psEventObjectOpenIN->sEventObject.hOSEventKM,
+ &psEventObjectOpenIN->sEventObject.hOSEventKM,
#endif
- psEventObjectOpenIN->sEventObject.hOSEventKM,
- PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+ psEventObjectOpenIN->sEventObject.hOSEventKM,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
- if(psEventObjectOpenOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
#if defined (SUPPORT_SID_INTERFACE)
- OSMemCopy(&sEventObject.szName,
- &psEventObjectOpenIN->sEventObject.szName,
- EVENTOBJNAME_MAXLENGTH);
+ OSMemCopy(&sEventObject.szName,
+ &psEventObjectOpenIN->sEventObject.szName,
+ EVENTOBJNAME_MAXLENGTH);
- psEventObjectOpenOUT->eError = OSEventObjectOpenKM(&sEventObject, &hOSEvent);
+ psEventObjectOpenOUT->eError = OSEventObjectOpenKM(&sEventObject, &hOSEvent);
#else
- psEventObjectOpenOUT->eError = OSEventObjectOpenKM(&psEventObjectOpenIN->sEventObject, &psEventObjectOpenOUT->hOSEvent);
+ psEventObjectOpenOUT->eError = OSEventObjectOpenKM(&psEventObjectOpenIN->sEventObject, &psEventObjectOpenOUT->hOSEvent);
#endif
- if(psEventObjectOpenOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
#if defined (SUPPORT_SID_INTERFACE)
#if !defined (WINXP) && !defined(SUPPORT_VISTA)
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &psEventObjectOpenOUT->hOSEvent,
- hOSEvent,
- PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psEventObjectOpenOUT->hOSEvent,
+ hOSEvent,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
#endif
#else
- PVRSRVAllocHandleNR(psPerProc->psHandleBase,
- &psEventObjectOpenOUT->hOSEvent,
- psEventObjectOpenOUT->hOSEvent,
- PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psEventObjectOpenOUT->hOSEvent,
+ psEventObjectOpenOUT->hOSEvent,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
#endif
- COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc)
+ COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc)
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVEventObjectCloseBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN,
- PVRSRV_BRIDGE_RETURN *psRetOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- IMG_HANDLE hOSEventKM;
+ IMG_HANDLE hOSEventKM;
#if defined (SUPPORT_SID_INTERFACE)
- PVRSRV_EVENTOBJECT_KM sEventObject;
+ PVRSRV_EVENTOBJECT_KM sEventObject;
#endif
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE);
- psRetOUT->eError =
- PVRSRVLookupHandle(psPerProc->psHandleBase,
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
#if defined (SUPPORT_SID_INTERFACE)
- &sEventObject.hOSEventKM,
+ &sEventObject.hOSEventKM,
#else
- &psEventObjectCloseIN->sEventObject.hOSEventKM,
+ &psEventObjectCloseIN->sEventObject.hOSEventKM,
#endif
- psEventObjectCloseIN->sEventObject.hOSEventKM,
- PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
-
- psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
- &hOSEventKM,
- psEventObjectCloseIN->hOSEventKM,
- PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
-
- if(psRetOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ psEventObjectCloseIN->sEventObject.hOSEventKM,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+ &hOSEventKM,
+ psEventObjectCloseIN->hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
#if defined (SUPPORT_SID_INTERFACE)
- if(CopyFromUserWrapper(psPerProc, ui32BridgeID,
- &sEventObject.szName,
- &psEventObjectCloseIN->sEventObject.szName,
- EVENTOBJNAME_MAXLENGTH) != PVRSRV_OK)
- {
-
- return -EFAULT;
- }
-
- psRetOUT->eError = OSEventObjectCloseKM(&sEventObject, hOSEventKM);
+ if(CopyFromUserWrapper(psPerProc, ui32BridgeID,
+ &sEventObject.szName,
+ &psEventObjectCloseIN->sEventObject.szName,
+ EVENTOBJNAME_MAXLENGTH) != PVRSRV_OK)
+ {
+
+ return -EFAULT;
+ }
+
+ psRetOUT->eError = OSEventObjectCloseKM(&sEventObject, hOSEventKM);
#else
- psRetOUT->eError = OSEventObjectCloseKM(&psEventObjectCloseIN->sEventObject, hOSEventKM);
+ psRetOUT->eError = OSEventObjectCloseKM(&psEventObjectCloseIN->sEventObject, hOSEventKM);
#endif
- return 0;
+ return 0;
}
typedef struct _MODIFY_SYNC_OP_INFO
{
- IMG_HANDLE hResItem;
- PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
- IMG_UINT32 ui32ModifyFlags;
- IMG_UINT32 ui32ReadOpsPendingSnapShot;
- IMG_UINT32 ui32WriteOpsPendingSnapShot;
+ IMG_HANDLE hResItem;
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+ IMG_UINT32 ui32ModifyFlags;
+ IMG_UINT32 ui32ReadOpsPendingSnapShot;
+ IMG_UINT32 ui32WriteOpsPendingSnapShot;
} MODIFY_SYNC_OP_INFO;
static PVRSRV_ERROR DoQuerySyncOpsSatisfied(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo,
- IMG_UINT32 ui32ReadOpsPendingSnapShot,
+ IMG_UINT32 ui32ReadOpsPendingSnapShot,
IMG_UINT32 ui32WriteOpsPendingSnapShot)
{
- IMG_UINT32 ui32WriteOpsPending;
- IMG_UINT32 ui32ReadOpsPending;
+ IMG_UINT32 ui32WriteOpsPending;
+ IMG_UINT32 ui32ReadOpsPending;
-
- if (!psKernelSyncInfo)
- {
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
+
+ if (!psKernelSyncInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
-
+
- ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
- ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
+ ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
+ ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
- if((ui32WriteOpsPending - ui32WriteOpsPendingSnapShot >=
- ui32WriteOpsPending - psKernelSyncInfo->psSyncData->ui32WriteOpsComplete) &&
- (ui32ReadOpsPending - ui32ReadOpsPendingSnapShot >=
+ if((ui32WriteOpsPending - ui32WriteOpsPendingSnapShot >=
+ ui32WriteOpsPending - psKernelSyncInfo->psSyncData->ui32WriteOpsComplete) &&
+ (ui32ReadOpsPending - ui32ReadOpsPendingSnapShot >=
ui32ReadOpsPending - psKernelSyncInfo->psSyncData->ui32ReadOpsComplete))
- {
+ {
#if defined(PDUMP) && !defined(SUPPORT_VGX)
-
- PDumpComment("Poll for read ops complete to reach value (pdump: %u, actual snapshot: %u)",
- psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal,
- ui32ReadOpsPendingSnapShot);
- PDumpMemPolKM(psKernelSyncInfo->psSyncDataMemInfoKM,
- offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
- psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal,
- 0xFFFFFFFF,
- PDUMP_POLL_OPERATOR_EQUAL,
- 0,
- MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
-
-
- PDumpComment("Poll for write ops complete to reach value (pdump: %u, actual snapshot: %u)",
- psKernelSyncInfo->psSyncData->ui32LastOpDumpVal,
- ui32WriteOpsPendingSnapShot);
- PDumpMemPolKM(psKernelSyncInfo->psSyncDataMemInfoKM,
- offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
- psKernelSyncInfo->psSyncData->ui32LastOpDumpVal,
- 0xFFFFFFFF,
- PDUMP_POLL_OPERATOR_EQUAL,
- 0,
- MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
-
+
+ PDumpComment("Poll for read ops complete to reach value (pdump: %u, actual snapshot: %u)",
+ psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ ui32ReadOpsPendingSnapShot);
+ PDumpMemPolKM(psKernelSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
+
+
+ PDumpComment("Poll for write ops complete to reach value (pdump: %u, actual snapshot: %u)",
+ psKernelSyncInfo->psSyncData->ui32LastOpDumpVal,
+ ui32WriteOpsPendingSnapShot);
+ PDumpMemPolKM(psKernelSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ psKernelSyncInfo->psSyncData->ui32LastOpDumpVal,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
+
#endif
- return PVRSRV_OK;
- }
- else
- {
- return PVRSRV_ERROR_RETRY;
- }
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_RETRY;
+ }
}
static PVRSRV_ERROR DoModifyCompleteSyncOps(MODIFY_SYNC_OP_INFO *psModSyncOpInfo)
{
- PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
-
- psKernelSyncInfo = psModSyncOpInfo->psKernelSyncInfo;
-
- if (!psKernelSyncInfo)
- {
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
-
-
- if((psModSyncOpInfo->ui32WriteOpsPendingSnapShot != psKernelSyncInfo->psSyncData->ui32WriteOpsComplete)
- || (psModSyncOpInfo->ui32ReadOpsPendingSnapShot != psKernelSyncInfo->psSyncData->ui32ReadOpsComplete))
- {
- return PVRSRV_ERROR_BAD_SYNC_STATE;
- }
-
-
- if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
- {
- psKernelSyncInfo->psSyncData->ui32WriteOpsComplete++;
- }
-
-
- if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
- {
- psKernelSyncInfo->psSyncData->ui32ReadOpsComplete++;
- }
-
- return PVRSRV_OK;
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+
+ psKernelSyncInfo = psModSyncOpInfo->psKernelSyncInfo;
+
+ if (!psKernelSyncInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ if((psModSyncOpInfo->ui32WriteOpsPendingSnapShot != psKernelSyncInfo->psSyncData->ui32WriteOpsComplete)
+ || (psModSyncOpInfo->ui32ReadOpsPendingSnapShot != psKernelSyncInfo->psSyncData->ui32ReadOpsComplete))
+ {
+ return PVRSRV_ERROR_BAD_SYNC_STATE;
+ }
+
+
+ if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
+ {
+ psKernelSyncInfo->psSyncData->ui32WriteOpsComplete++;
+ }
+
+
+ if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
+ {
+ psKernelSyncInfo->psSyncData->ui32ReadOpsComplete++;
+ }
+
+ return PVRSRV_OK;
}
-static PVRSRV_ERROR ModifyCompleteSyncOpsCallBack(IMG_PVOID pvParam,
+static PVRSRV_ERROR ModifyCompleteSyncOpsCallBack(IMG_PVOID pvParam,
IMG_UINT32 ui32Param,
IMG_BOOL bDummy)
{
- MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
- PVR_UNREFERENCED_PARAMETER(ui32Param);
- PVR_UNREFERENCED_PARAMETER(bDummy);
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ if (!pvParam)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: invalid parameter"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psModSyncOpInfo = (MODIFY_SYNC_OP_INFO*)pvParam;
- if (!pvParam)
- {
- PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: invalid parameter"));
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
-
- psModSyncOpInfo = (MODIFY_SYNC_OP_INFO*)pvParam;
-
- if (psModSyncOpInfo->psKernelSyncInfo)
- {
-
- LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
- {
- if (DoQuerySyncOpsSatisfied(psModSyncOpInfo->psKernelSyncInfo,
- psModSyncOpInfo->ui32ReadOpsPendingSnapShot,
+ if (psModSyncOpInfo->psKernelSyncInfo)
+ {
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if (DoQuerySyncOpsSatisfied(psModSyncOpInfo->psKernelSyncInfo,
+ psModSyncOpInfo->ui32ReadOpsPendingSnapShot,
psModSyncOpInfo->ui32WriteOpsPendingSnapShot) == PVRSRV_OK)
- {
- goto OpFlushedComplete;
- }
- PVR_DPF((PVR_DBG_WARNING, "ModifyCompleteSyncOpsCallBack: waiting for current Ops to flush"));
+ {
+ goto OpFlushedComplete;
+ }
+ PVR_DPF((PVR_DBG_WARNING, "ModifyCompleteSyncOpsCallBack: waiting for current Ops to flush"));
OSSleepms(1);
- } END_LOOP_UNTIL_TIMEOUT();
+ } END_LOOP_UNTIL_TIMEOUT();
- PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: timeout whilst waiting for current Ops to flush."));
- PVR_DPF((PVR_DBG_ERROR, " Write ops pending snapshot = %d, write ops complete = %d",
- psModSyncOpInfo->ui32WriteOpsPendingSnapShot,
- psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32WriteOpsComplete));
+ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: timeout whilst waiting for current Ops to flush."));
+ PVR_DPF((PVR_DBG_ERROR, " Write ops pending snapshot = %d, write ops complete = %d",
+ psModSyncOpInfo->ui32WriteOpsPendingSnapShot,
+ psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32WriteOpsComplete));
PVR_DPF((PVR_DBG_ERROR, " Read ops pending snapshot = %d, write ops complete = %d",
- psModSyncOpInfo->ui32ReadOpsPendingSnapShot,
- psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32ReadOpsComplete));
+ psModSyncOpInfo->ui32ReadOpsPendingSnapShot,
+ psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32ReadOpsComplete));
- return PVRSRV_ERROR_TIMEOUT;
+ return PVRSRV_ERROR_TIMEOUT;
- OpFlushedComplete:
+OpFlushedComplete:
- DoModifyCompleteSyncOps(psModSyncOpInfo);
- }
+ DoModifyCompleteSyncOps(psModSyncOpInfo);
+ }
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MODIFY_SYNC_OP_INFO), (IMG_VOID *)psModSyncOpInfo, 0);
-
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MODIFY_SYNC_OP_INFO), (IMG_VOID *)psModSyncOpInfo, 0);
+
-
- PVRSRVScheduleDeviceCallbacks();
+
+ PVRSRVScheduleDeviceCallbacks();
- return PVRSRV_OK;
+ return PVRSRV_OK;
}
static IMG_INT
PVRSRVCreateSyncInfoModObjBW(IMG_UINT32 ui32BridgeID,
- IMG_VOID *psBridgeIn,
- PVRSRV_BRIDGE_OUT_CREATE_SYNC_INFO_MOD_OBJ *psCreateSyncInfoModObjOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_OUT_CREATE_SYNC_INFO_MOD_OBJ *psCreateSyncInfoModObjOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
- PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ);
- NEW_HANDLE_BATCH_OR_ERROR(psCreateSyncInfoModObjOUT->eError, psPerProc, 1)
+ NEW_HANDLE_BATCH_OR_ERROR(psCreateSyncInfoModObjOUT->eError, psPerProc, 1)
- ASSIGN_AND_EXIT_ON_ERROR(psCreateSyncInfoModObjOUT->eError,
- OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
- sizeof(MODIFY_SYNC_OP_INFO),
- (IMG_VOID **)&psModSyncOpInfo, 0,
- "ModSyncOpInfo (MODIFY_SYNC_OP_INFO)"));
+ ASSIGN_AND_EXIT_ON_ERROR(psCreateSyncInfoModObjOUT->eError,
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(MODIFY_SYNC_OP_INFO),
+ (IMG_VOID **)&psModSyncOpInfo, 0,
+ "ModSyncOpInfo (MODIFY_SYNC_OP_INFO)"));
- psModSyncOpInfo->psKernelSyncInfo = IMG_NULL;
+ psModSyncOpInfo->psKernelSyncInfo = IMG_NULL;
- psCreateSyncInfoModObjOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
- &psCreateSyncInfoModObjOUT->hKernelSyncInfoModObj,
- psModSyncOpInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ,
- PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE);
+ psCreateSyncInfoModObjOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psCreateSyncInfoModObjOUT->hKernelSyncInfoModObj,
+ psModSyncOpInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ,
+ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE);
- if (psCreateSyncInfoModObjOUT->eError != PVRSRV_OK)
- {
- return 0;
- }
+ if (psCreateSyncInfoModObjOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
- psModSyncOpInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
- RESMAN_TYPE_MODIFY_SYNC_OPS,
- psModSyncOpInfo,
- 0,
- &ModifyCompleteSyncOpsCallBack);
+ psModSyncOpInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_MODIFY_SYNC_OPS,
+ psModSyncOpInfo,
+ 0,
+ &ModifyCompleteSyncOpsCallBack);
- COMMIT_HANDLE_BATCH_OR_ERROR(psCreateSyncInfoModObjOUT->eError, psPerProc)
+ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateSyncInfoModObjOUT->eError, psPerProc)
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVDestroySyncInfoModObjBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_DESTROY_SYNC_INFO_MOD_OBJ *psDestroySyncInfoModObjIN,
- PVRSRV_BRIDGE_RETURN *psDestroySyncInfoModObjOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_DESTROY_SYNC_INFO_MOD_OBJ *psDestroySyncInfoModObjIN,
+ PVRSRV_BRIDGE_RETURN *psDestroySyncInfoModObjOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ);
-
- psDestroySyncInfoModObjOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID**)&psModSyncOpInfo,
- psDestroySyncInfoModObjIN->hKernelSyncInfoModObj,
- PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
- if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: PVRSRVLookupHandle failed"));
- return 0;
- }
-
- if(psModSyncOpInfo->psKernelSyncInfo != IMG_NULL)
- {
-
- psDestroySyncInfoModObjOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
- return 0;
- }
-
- psDestroySyncInfoModObjOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
- psDestroySyncInfoModObjIN->hKernelSyncInfoModObj,
- PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
-
- if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: PVRSRVReleaseHandle failed"));
- return 0;
- }
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ);
+
+ psDestroySyncInfoModObjOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psModSyncOpInfo,
+ psDestroySyncInfoModObjIN->hKernelSyncInfoModObj,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
+ if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ if(psModSyncOpInfo->psKernelSyncInfo != IMG_NULL)
+ {
+
+ psDestroySyncInfoModObjOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+ psDestroySyncInfoModObjOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psDestroySyncInfoModObjIN->hKernelSyncInfoModObj,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
+
+ if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: PVRSRVReleaseHandle failed"));
+ return 0;
+ }
psDestroySyncInfoModObjOUT->eError = ResManFreeResByPtr(psModSyncOpInfo->hResItem, CLEANUP_WITH_POLL);
- if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: ResManFreeResByPtr failed"));
- return 0;
- }
+ if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: ResManFreeResByPtr failed"));
+ return 0;
+ }
- return 0;
+ return 0;
}
static IMG_INT
-PVRSRVModifyPendingSyncOpsBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsIN,
- PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+PVRSRVModifyPendingSyncOpsBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsIN,
+ PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
- MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS);
-
- psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID**)&psModSyncOpInfo,
- psModifySyncOpsIN->hKernelSyncInfoModObj,
- PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
- if (psModifySyncOpsOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
- return 0;
- }
-
- psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID**)&psKernelSyncInfo,
- psModifySyncOpsIN->hKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO);
- if (psModifySyncOpsOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
- return 0;
- }
-
- if(psModSyncOpInfo->psKernelSyncInfo)
- {
-
- psModifySyncOpsOUT->eError = PVRSRV_ERROR_RETRY;
- PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVModifyPendingSyncOpsBW: SyncInfo Modification object is not empty"));
- return 0;
- }
-
-
- psModSyncOpInfo->psKernelSyncInfo = psKernelSyncInfo;
- psModSyncOpInfo->ui32ModifyFlags = psModifySyncOpsIN->ui32ModifyFlags;
- psModSyncOpInfo->ui32ReadOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
- psModSyncOpInfo->ui32WriteOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
-
-
-
- psModifySyncOpsOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
- psModifySyncOpsOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
-
- if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
- {
- psKernelSyncInfo->psSyncData->ui32WriteOpsPending++;
- }
-
- if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
- {
- psKernelSyncInfo->psSyncData->ui32ReadOpsPending++;
- }
-
-
- psModifySyncOpsOUT->eError = ResManDissociateRes(psModSyncOpInfo->hResItem,
- psPerProc->hResManContext);
-
- if (psModifySyncOpsOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
- return 0;
- }
-
- return 0;
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS);
+
+ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psModSyncOpInfo,
+ psModifySyncOpsIN->hKernelSyncInfoModObj,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
+ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psKernelSyncInfo,
+ psModifySyncOpsIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ if(psModSyncOpInfo->psKernelSyncInfo)
+ {
+
+ psModifySyncOpsOUT->eError = PVRSRV_ERROR_RETRY;
+ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVModifyPendingSyncOpsBW: SyncInfo Modification object is not empty"));
+ return 0;
+ }
+
+
+ psModSyncOpInfo->psKernelSyncInfo = psKernelSyncInfo;
+ psModSyncOpInfo->ui32ModifyFlags = psModifySyncOpsIN->ui32ModifyFlags;
+ psModSyncOpInfo->ui32ReadOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
+ psModSyncOpInfo->ui32WriteOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
+
+
+
+ psModifySyncOpsOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
+ psModifySyncOpsOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
+
+ if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
+ {
+ psKernelSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+
+ if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
+ {
+ psKernelSyncInfo->psSyncData->ui32ReadOpsPending++;
+ }
+
+
+ psModifySyncOpsOUT->eError = ResManDissociateRes(psModSyncOpInfo->hResItem,
+ psPerProc->hResManContext);
+
+ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ return 0;
}
static IMG_INT
-PVRSRVModifyCompleteSyncOpsBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS *psModifySyncOpsIN,
- PVRSRV_BRIDGE_RETURN *psModifySyncOpsOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+PVRSRVModifyCompleteSyncOpsBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS *psModifySyncOpsIN,
+ PVRSRV_BRIDGE_RETURN *psModifySyncOpsOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS);
- psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID**)&psModSyncOpInfo,
- psModifySyncOpsIN->hKernelSyncInfoModObj,
- PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
- if (psModifySyncOpsOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: PVRSRVLookupHandle failed"));
- return 0;
- }
+ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psModSyncOpInfo,
+ psModifySyncOpsIN->hKernelSyncInfoModObj,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
+ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
- if(psModSyncOpInfo->psKernelSyncInfo == IMG_NULL)
- {
-
- psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
- return 0;
- }
+ if(psModSyncOpInfo->psKernelSyncInfo == IMG_NULL)
+ {
+
+ psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
- psModifySyncOpsOUT->eError = DoModifyCompleteSyncOps(psModSyncOpInfo);
+ psModifySyncOpsOUT->eError = DoModifyCompleteSyncOps(psModSyncOpInfo);
- if (psModifySyncOpsOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: DoModifyCompleteSyncOps failed"));
- return 0;
- }
+ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: DoModifyCompleteSyncOps failed"));
+ return 0;
+ }
- psModSyncOpInfo->psKernelSyncInfo = IMG_NULL;
+ psModSyncOpInfo->psKernelSyncInfo = IMG_NULL;
-
- PVRSRVScheduleDeviceCallbacks();
+
+ PVRSRVScheduleDeviceCallbacks();
- return 0;
+ return 0;
}
static IMG_INT
-PVRSRVSyncOpsTakeTokenBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_SYNC_OPS_TAKE_TOKEN *psSyncOpsTakeTokenIN,
- PVRSRV_BRIDGE_OUT_SYNC_OPS_TAKE_TOKEN *psSyncOpsTakeTokenOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+PVRSRVSyncOpsTakeTokenBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SYNC_OPS_TAKE_TOKEN *psSyncOpsTakeTokenIN,
+ PVRSRV_BRIDGE_OUT_SYNC_OPS_TAKE_TOKEN *psSyncOpsTakeTokenOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN);
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN);
- psSyncOpsTakeTokenOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID**)&psKernelSyncInfo,
- psSyncOpsTakeTokenIN->hKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO);
- if (psSyncOpsTakeTokenOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsTakeTokenBW: PVRSRVLookupHandle failed"));
- return 0;
- }
+ psSyncOpsTakeTokenOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psKernelSyncInfo,
+ psSyncOpsTakeTokenIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if (psSyncOpsTakeTokenOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsTakeTokenBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
-
+
- psSyncOpsTakeTokenOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
- psSyncOpsTakeTokenOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
+ psSyncOpsTakeTokenOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
+ psSyncOpsTakeTokenOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVSyncOpsFlushToTokenBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_TOKEN *psSyncOpsFlushToTokenIN,
- PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToTokenOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_TOKEN *psSyncOpsFlushToTokenIN,
+ PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToTokenOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
- IMG_UINT32 ui32ReadOpsPendingSnapshot;
- IMG_UINT32 ui32WriteOpsPendingSnapshot;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN);
-
- psSyncOpsFlushToTokenOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID**)&psKernelSyncInfo,
- psSyncOpsFlushToTokenIN->hKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO);
- if (psSyncOpsFlushToTokenOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToTokenBW: PVRSRVLookupHandle failed"));
- return 0;
- }
-
- ui32ReadOpsPendingSnapshot = psSyncOpsFlushToTokenIN->ui32ReadOpsPendingSnapshot;
- ui32WriteOpsPendingSnapshot = psSyncOpsFlushToTokenIN->ui32WriteOpsPendingSnapshot;
-
- psSyncOpsFlushToTokenOUT->eError = DoQuerySyncOpsSatisfied(psKernelSyncInfo,
- ui32ReadOpsPendingSnapshot,
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+ IMG_UINT32 ui32ReadOpsPendingSnapshot;
+ IMG_UINT32 ui32WriteOpsPendingSnapshot;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN);
+
+ psSyncOpsFlushToTokenOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psKernelSyncInfo,
+ psSyncOpsFlushToTokenIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if (psSyncOpsFlushToTokenOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToTokenBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ ui32ReadOpsPendingSnapshot = psSyncOpsFlushToTokenIN->ui32ReadOpsPendingSnapshot;
+ ui32WriteOpsPendingSnapshot = psSyncOpsFlushToTokenIN->ui32WriteOpsPendingSnapshot;
+
+ psSyncOpsFlushToTokenOUT->eError = DoQuerySyncOpsSatisfied(psKernelSyncInfo,
+ ui32ReadOpsPendingSnapshot,
ui32WriteOpsPendingSnapshot);
- if (psSyncOpsFlushToTokenOUT->eError != PVRSRV_OK && psSyncOpsFlushToTokenOUT->eError != PVRSRV_ERROR_RETRY)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToTokenBW: DoQuerySyncOpsSatisfied failed"));
- return 0;
- }
+ if (psSyncOpsFlushToTokenOUT->eError != PVRSRV_OK && psSyncOpsFlushToTokenOUT->eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToTokenBW: DoQuerySyncOpsSatisfied failed"));
+ return 0;
+ }
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVSyncOpsFlushToModObjBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_MOD_OBJ *psSyncOpsFlushToModObjIN,
- PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToModObjOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_MOD_OBJ *psSyncOpsFlushToModObjIN,
+ PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToModObjOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ);
-
- psSyncOpsFlushToModObjOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID**)&psModSyncOpInfo,
- psSyncOpsFlushToModObjIN->hKernelSyncInfoModObj,
- PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
- if (psSyncOpsFlushToModObjOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToModObjBW: PVRSRVLookupHandle failed"));
- return 0;
- }
-
- if(psModSyncOpInfo->psKernelSyncInfo == IMG_NULL)
- {
-
- psSyncOpsFlushToModObjOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
- return 0;
- }
-
- psSyncOpsFlushToModObjOUT->eError = DoQuerySyncOpsSatisfied(psModSyncOpInfo->psKernelSyncInfo,
- psModSyncOpInfo->ui32ReadOpsPendingSnapShot,
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ);
+
+ psSyncOpsFlushToModObjOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psModSyncOpInfo,
+ psSyncOpsFlushToModObjIN->hKernelSyncInfoModObj,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
+ if (psSyncOpsFlushToModObjOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToModObjBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ if(psModSyncOpInfo->psKernelSyncInfo == IMG_NULL)
+ {
+
+ psSyncOpsFlushToModObjOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+ psSyncOpsFlushToModObjOUT->eError = DoQuerySyncOpsSatisfied(psModSyncOpInfo->psKernelSyncInfo,
+ psModSyncOpInfo->ui32ReadOpsPendingSnapShot,
psModSyncOpInfo->ui32WriteOpsPendingSnapShot);
- if (psSyncOpsFlushToModObjOUT->eError != PVRSRV_OK && psSyncOpsFlushToModObjOUT->eError != PVRSRV_ERROR_RETRY)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToModObjBW: DoQuerySyncOpsSatisfied failed"));
- return 0;
- }
+ if (psSyncOpsFlushToModObjOUT->eError != PVRSRV_OK && psSyncOpsFlushToModObjOUT->eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToModObjBW: DoQuerySyncOpsSatisfied failed"));
+ return 0;
+ }
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVSyncOpsFlushToDeltaBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_DELTA *psSyncOpsFlushToDeltaIN,
- PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToDeltaOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_DELTA *psSyncOpsFlushToDeltaIN,
+ PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToDeltaOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
- IMG_UINT32 ui32DeltaRead;
- IMG_UINT32 ui32DeltaWrite;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA);
-
- psSyncOpsFlushToDeltaOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID**)&psSyncInfo,
- psSyncOpsFlushToDeltaIN->hKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO);
- if (psSyncOpsFlushToDeltaOUT->eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToDeltaBW: PVRSRVLookupHandle failed"));
- return 0;
- }
-
-
- ui32DeltaRead = psSyncInfo->psSyncData->ui32ReadOpsPending - psSyncInfo->psSyncData->ui32ReadOpsComplete;
- ui32DeltaWrite = psSyncInfo->psSyncData->ui32WriteOpsPending - psSyncInfo->psSyncData->ui32WriteOpsComplete;
-
- if (ui32DeltaRead <= psSyncOpsFlushToDeltaIN->ui32Delta && ui32DeltaWrite <= psSyncOpsFlushToDeltaIN->ui32Delta)
- {
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ IMG_UINT32 ui32DeltaRead;
+ IMG_UINT32 ui32DeltaWrite;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA);
+
+ psSyncOpsFlushToDeltaOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psSyncInfo,
+ psSyncOpsFlushToDeltaIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if (psSyncOpsFlushToDeltaOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToDeltaBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+
+ ui32DeltaRead = psSyncInfo->psSyncData->ui32ReadOpsPending - psSyncInfo->psSyncData->ui32ReadOpsComplete;
+ ui32DeltaWrite = psSyncInfo->psSyncData->ui32WriteOpsPending - psSyncInfo->psSyncData->ui32WriteOpsComplete;
+
+ if (ui32DeltaRead <= psSyncOpsFlushToDeltaIN->ui32Delta && ui32DeltaWrite <= psSyncOpsFlushToDeltaIN->ui32Delta)
+ {
#if defined(PDUMP) && !defined(SUPPORT_VGX)
-
- PDumpComment("Poll for read ops complete to delta (%u)",
- psSyncOpsFlushToDeltaIN->ui32Delta);
- psSyncOpsFlushToDeltaOUT->eError =
- PDumpMemPolKM(psSyncInfo->psSyncDataMemInfoKM,
- offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
- psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
- 0xFFFFFFFF,
- PDUMP_POLL_OPERATOR_GREATEREQUAL,
- 0,
- MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
-
-
- PDumpComment("Poll for write ops complete to delta (%u)",
- psSyncOpsFlushToDeltaIN->ui32Delta);
- psSyncOpsFlushToDeltaOUT->eError =
- PDumpMemPolKM(psSyncInfo->psSyncDataMemInfoKM,
- offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
- psSyncInfo->psSyncData->ui32LastOpDumpVal,
- 0xFFFFFFFF,
- PDUMP_POLL_OPERATOR_GREATEREQUAL,
- 0,
- MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+
+ PDumpComment("Poll for read ops complete to delta (%u)",
+ psSyncOpsFlushToDeltaIN->ui32Delta);
+ psSyncOpsFlushToDeltaOUT->eError =
+ PDumpMemPolKM(psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_GREATEREQUAL,
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+
+
+ PDumpComment("Poll for write ops complete to delta (%u)",
+ psSyncOpsFlushToDeltaIN->ui32Delta);
+ psSyncOpsFlushToDeltaOUT->eError =
+ PDumpMemPolKM(psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_GREATEREQUAL,
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
#endif
- psSyncOpsFlushToDeltaOUT->eError = PVRSRV_OK;
- }
- else
- {
- psSyncOpsFlushToDeltaOUT->eError = PVRSRV_ERROR_RETRY;
- }
+ psSyncOpsFlushToDeltaOUT->eError = PVRSRV_OK;
+ }
+ else
+ {
+ psSyncOpsFlushToDeltaOUT->eError = PVRSRV_ERROR_RETRY;
+ }
- return 0;
+ return 0;
}
static PVRSRV_ERROR
-FreeSyncInfoCallback(IMG_PVOID pvParam,
+FreeSyncInfoCallback(IMG_PVOID pvParam,
IMG_UINT32 ui32Param,
IMG_BOOL bDummy)
{
- PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
- PVRSRV_ERROR eError;
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_ERROR eError;
- PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
PVR_UNREFERENCED_PARAMETER(bDummy);
- psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)pvParam;
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)pvParam;
- eError = PVRSRVFreeSyncInfoKM(psSyncInfo);
- if (eError != PVRSRV_OK)
- {
- return eError;
- }
+ eError = PVRSRVFreeSyncInfoKM(psSyncInfo);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
- return PVRSRV_OK;
+ return PVRSRV_OK;
}
static IMG_INT
PVRSRVAllocSyncInfoBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_ALLOC_SYNC_INFO *psAllocSyncInfoIN,
- PVRSRV_BRIDGE_OUT_ALLOC_SYNC_INFO *psAllocSyncInfoOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_ALLOC_SYNC_INFO *psAllocSyncInfoIN,
+ PVRSRV_BRIDGE_OUT_ALLOC_SYNC_INFO *psAllocSyncInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
- PVRSRV_ERROR eError;
- PVRSRV_DEVICE_NODE *psDeviceNode;
- IMG_HANDLE hDevMemContext;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SYNC_INFO);
-
- NEW_HANDLE_BATCH_OR_ERROR(psAllocSyncInfoOUT->eError, psPerProc, 1)
-
- eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_HANDLE *)&psDeviceNode,
- psAllocSyncInfoIN->hDevCookie,
- PVRSRV_HANDLE_TYPE_DEV_NODE);
- if(eError != PVRSRV_OK)
- {
- goto allocsyncinfo_errorexit;
- }
-
- hDevMemContext = psDeviceNode->sDevMemoryInfo.pBMKernelContext;
-
- eError = PVRSRVAllocSyncInfoKM(psDeviceNode,
- hDevMemContext,
- &psSyncInfo);
-
- if (eError != PVRSRV_OK)
- {
- goto allocsyncinfo_errorexit;
- }
-
- eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
- &psAllocSyncInfoOUT->hKernelSyncInfo,
- psSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO,
- PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE);
-
- if(eError != PVRSRV_OK)
- {
- goto allocsyncinfo_errorexit_freesyncinfo;
- }
-
- psSyncInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
- RESMAN_TYPE_SYNC_INFO,
- psSyncInfo,
- 0,
- FreeSyncInfoCallback);
-
-
- goto allocsyncinfo_commit;
-
-
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_HANDLE hDevMemContext;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SYNC_INFO);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psAllocSyncInfoOUT->eError, psPerProc, 1)
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_HANDLE *)&psDeviceNode,
+ psAllocSyncInfoIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(eError != PVRSRV_OK)
+ {
+ goto allocsyncinfo_errorexit;
+ }
+
+ hDevMemContext = psDeviceNode->sDevMemoryInfo.pBMKernelContext;
+
+ eError = PVRSRVAllocSyncInfoKM(psDeviceNode,
+ hDevMemContext,
+ &psSyncInfo);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto allocsyncinfo_errorexit;
+ }
+
+ eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psAllocSyncInfoOUT->hKernelSyncInfo,
+ psSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE);
+
+ if(eError != PVRSRV_OK)
+ {
+ goto allocsyncinfo_errorexit_freesyncinfo;
+ }
+
+ psSyncInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SYNC_INFO,
+ psSyncInfo,
+ 0,
+ FreeSyncInfoCallback);
+
+
+ goto allocsyncinfo_commit;
+
+
allocsyncinfo_errorexit_freesyncinfo:
- PVRSRVFreeSyncInfoKM(psSyncInfo);
+ PVRSRVFreeSyncInfoKM(psSyncInfo);
allocsyncinfo_errorexit:
-
+
allocsyncinfo_commit:
- psAllocSyncInfoOUT->eError = eError;
- COMMIT_HANDLE_BATCH_OR_ERROR(eError, psPerProc);
+ psAllocSyncInfoOUT->eError = eError;
+ COMMIT_HANDLE_BATCH_OR_ERROR(eError, psPerProc);
- return 0;
+ return 0;
}
static IMG_INT
PVRSRVFreeSyncInfoBW(IMG_UINT32 ui32BridgeID,
- PVRSRV_BRIDGE_IN_FREE_SYNC_INFO *psFreeSyncInfoIN,
- PVRSRV_BRIDGE_RETURN *psFreeSyncInfoOUT,
- PVRSRV_PER_PROCESS_DATA *psPerProc)
+ PVRSRV_BRIDGE_IN_FREE_SYNC_INFO *psFreeSyncInfoIN,
+ PVRSRV_BRIDGE_RETURN *psFreeSyncInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
{
- PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
- PVRSRV_ERROR eError;
-
- PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SYNC_INFO);
-
- eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
- (IMG_VOID**)&psSyncInfo,
- psFreeSyncInfoIN->hKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: PVRSRVLookupHandle failed"));
- psFreeSyncInfoOUT->eError = eError;
- return 0;
- }
-
- eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
- psFreeSyncInfoIN->hKernelSyncInfo,
- PVRSRV_HANDLE_TYPE_SYNC_INFO);
-
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: PVRSRVReleaseHandle failed"));
- psFreeSyncInfoOUT->eError = eError;
- return 0;
- }
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_ERROR eError;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SYNC_INFO);
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psSyncInfo,
+ psFreeSyncInfoIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: PVRSRVLookupHandle failed"));
+ psFreeSyncInfoOUT->eError = eError;
+ return 0;
+ }
+
+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psFreeSyncInfoIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: PVRSRVReleaseHandle failed"));
+ psFreeSyncInfoOUT->eError = eError;
+ return 0;
+ }
eError = ResManFreeResByPtr(psSyncInfo->hResItem, CLEANUP_WITH_POLL);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: ResManFreeResByPtr failed"));
- psFreeSyncInfoOUT->eError = eError;
- return 0;
- }
-
- return 0;
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: ResManFreeResByPtr failed"));
+ psFreeSyncInfoOUT->eError = eError;
+ return 0;
+ }
+
+ return 0;
}
PVRSRV_ERROR
CommonBridgeInit(IMG_VOID)
{
- IMG_UINT32 i;
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRVEnumerateDevicesBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRVAcquireDeviceDataBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRVCreateDeviceMemContextBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRVDestroyDeviceMemContextBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO, PVRSRVGetDeviceMemHeapInfoBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRVAllocDeviceMemBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRVFreeDeviceMemBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRVGetFreeDeviceMemBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA, PVRMMapOSMemHandleToMMapDataBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRVDisconnectBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM , DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, PVRSRVMapDeviceMemoryBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, PVRSRVUnmapDeviceMemoryBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRVMapDeviceClassMemoryBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRVUnmapDeviceClassMemoryBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM, PVRSRVExportDeviceMemBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MMAP_DATA, PVRMMapReleaseMMapDataBW);
+ IMG_UINT32 i;
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRVEnumerateDevicesBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRVAcquireDeviceDataBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRVCreateDeviceMemContextBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRVDestroyDeviceMemContextBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO, PVRSRVGetDeviceMemHeapInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRVAllocDeviceMemBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRVFreeDeviceMemBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRVGetFreeDeviceMemBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA, PVRMMapOSMemHandleToMMapDataBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRVDisconnectBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM , DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, PVRSRVMapDeviceMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, PVRSRVUnmapDeviceMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRVMapDeviceClassMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRVUnmapDeviceClassMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM, PVRSRVExportDeviceMemBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MMAP_DATA, PVRMMapReleaseMMapDataBW);
SetDispatchTableEntry(PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS, PVRSRVChangeDeviceMemoryAttributesBW);
SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2, PVRSRVMapDeviceMemoryBW);
SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2, PVRSRVExportDeviceMemBW);
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW);
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW);
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW);
-
+
#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
- SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW);
#endif
-
+
#if defined(PDUMP)
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PDumpIsCaptureFrameBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPAGES, PDumpMemPagesBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PDumpDriverInfoBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PDumpPDDevPAddrBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PDumpCycleCountRegReadBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE, PDumpStartInitPhaseBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE, PDumpStopInitPhaseBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PDumpIsCaptureFrameBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPAGES, PDumpMemPagesBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PDumpDriverInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PDumpPDDevPAddrBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PDumpCycleCountRegReadBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE, PDumpStartInitPhaseBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE, PDumpStopInitPhaseBW);
#endif
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW);
-
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW);
-
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRVOpenDCDeviceBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRVCloseDCDeviceBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRVEnumDCFormatsBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRVEnumDCDimsBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRVGetDCSystemBufferBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRVGetDCInfoBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRVCreateDCSwapChainBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRVDestroyDCSwapChainBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRVSetDCDstRectBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRVSetDCSrcRectBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRVSetDCDstColourKeyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRVSetDCSrcColourKeyBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRVGetDCBuffersBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRVSwapToDCBufferBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRVSwapToDCSystemBW);
-
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRVOpenBCDeviceBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRVCloseBCDeviceBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRVGetBCInfoBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRVGetBCBufferBW);
-
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRVWrapExtMemoryBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRVUnwrapExtMemoryBW);
-
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRVAllocSharedSysMemoryBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRVFreeSharedSysMemoryBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRVMapMemInfoMemBW);
-
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR, MMU_GetPDDevPAddrBW);
-
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, &PVRSRVInitSrvConnectBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, &PVRSRVInitSrvDisconnectBW);
-
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, &PVRSRVEventObjectWaitBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, &PVRSRVEventObjectOpenBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, &PVRSRVEventObjectCloseBW);
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ, PVRSRVCreateSyncInfoModObjBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ, PVRSRVDestroySyncInfoModObjBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS, PVRSRVModifyPendingSyncOpsBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS, PVRSRVModifyCompleteSyncOpsBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN, PVRSRVSyncOpsTakeTokenBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN, PVRSRVSyncOpsFlushToTokenBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ, PVRSRVSyncOpsFlushToModObjBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA, PVRSRVSyncOpsFlushToDeltaBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SYNC_INFO, PVRSRVAllocSyncInfoBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SYNC_INFO, PVRSRVFreeSyncInfoBW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRVOpenDCDeviceBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRVCloseDCDeviceBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRVEnumDCFormatsBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRVEnumDCDimsBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRVGetDCSystemBufferBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRVGetDCInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRVCreateDCSwapChainBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRVDestroyDCSwapChainBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRVSetDCDstRectBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRVSetDCSrcRectBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRVSetDCDstColourKeyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRVSetDCSrcColourKeyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRVGetDCBuffersBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRVSwapToDCBufferBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRVSwapToDCSystemBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRVOpenBCDeviceBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRVCloseBCDeviceBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRVGetBCInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRVGetBCBufferBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRVWrapExtMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRVUnwrapExtMemoryBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRVAllocSharedSysMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRVFreeSharedSysMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRVMapMemInfoMemBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, &PVRSRVInitSrvConnectBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, &PVRSRVInitSrvDisconnectBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, &PVRSRVEventObjectWaitBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, &PVRSRVEventObjectOpenBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, &PVRSRVEventObjectCloseBW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ, PVRSRVCreateSyncInfoModObjBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ, PVRSRVDestroySyncInfoModObjBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS, PVRSRVModifyPendingSyncOpsBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS, PVRSRVModifyCompleteSyncOpsBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN, PVRSRVSyncOpsTakeTokenBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN, PVRSRVSyncOpsFlushToTokenBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ, PVRSRVSyncOpsFlushToModObjBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA, PVRSRVSyncOpsFlushToDeltaBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SYNC_INFO, PVRSRVAllocSyncInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SYNC_INFO, PVRSRVFreeSyncInfoBW);
#if defined (SUPPORT_SGX)
- SetSGXDispatchTableEntry();
+ SetSGXDispatchTableEntry();
#endif
#if defined (SUPPORT_VGX)
- SetVGXDispatchTableEntry();
+ SetVGXDispatchTableEntry();
#endif
#if defined (SUPPORT_MSVDX)
- SetMSVDXDispatchTableEntry();
+ SetMSVDXDispatchTableEntry();
#endif
-
-
- for(i=0;i<BRIDGE_DISPATCH_TABLE_ENTRY_COUNT;i++)
- {
- if(!g_BridgeDispatchTable[i].pfFunction)
- {
- g_BridgeDispatchTable[i].pfFunction = &DummyBW;
+
+
+ for(i=0;i<BRIDGE_DISPATCH_TABLE_ENTRY_COUNT;i++)
+ {
+ if(!g_BridgeDispatchTable[i].pfFunction)
+ {
+ g_BridgeDispatchTable[i].pfFunction = &DummyBW;
#if defined(DEBUG_BRIDGE_KM)
- g_BridgeDispatchTable[i].pszIOCName = "_PVRSRV_BRIDGE_DUMMY";
- g_BridgeDispatchTable[i].pszFunctionName = "DummyBW";
- g_BridgeDispatchTable[i].ui32CallCount = 0;
- g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
- g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
+ g_BridgeDispatchTable[i].pszIOCName = "_PVRSRV_BRIDGE_DUMMY";
+ g_BridgeDispatchTable[i].pszFunctionName = "DummyBW";
+ g_BridgeDispatchTable[i].ui32CallCount = 0;
+ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
+ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
#endif
- }
- }
+ }
+ }
- return PVRSRV_OK;
+ return PVRSRV_OK;
}
IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
- PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM)
+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM)
{
- IMG_VOID * psBridgeIn;
- IMG_VOID * psBridgeOut;
- BridgeWrapperFunction pfBridgeHandler;
- IMG_UINT32 ui32BridgeID = psBridgePackageKM->ui32BridgeID;
- IMG_INT err = -EFAULT;
+ IMG_VOID * psBridgeIn;
+ IMG_VOID * psBridgeOut;
+ BridgeWrapperFunction pfBridgeHandler;
+ IMG_UINT32 ui32BridgeID = psBridgePackageKM->ui32BridgeID;
+ IMG_INT err = -EFAULT;
#if defined(DEBUG_TRACE_BRIDGE_KM)
- PVR_DPF((PVR_DBG_ERROR, "%s: %s",
- __FUNCTION__,
- g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
+ PVR_DPF((PVR_DBG_ERROR, "%s: %s",
+ __FUNCTION__,
+ g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
#endif
#if defined(DEBUG_BRIDGE_KM)
- g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++;
- g_BridgeGlobalStats.ui32IOCTLCount++;
+ g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++;
+ g_BridgeGlobalStats.ui32IOCTLCount++;
#endif
- if(!psPerProc->bInitProcess)
- {
- if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
- {
- if(!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed. Driver unusable.",
- __FUNCTION__));
- goto return_fault;
- }
- }
- else
- {
- if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING))
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress",
- __FUNCTION__));
- goto return_fault;
- }
- else
- {
-
- switch(ui32BridgeID)
- {
- case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES):
- case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES):
- case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT):
- case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT):
- break;
- default:
- PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.",
- __FUNCTION__));
- goto return_fault;
- }
- }
- }
- }
+ if(!psPerProc->bInitProcess)
+ {
+ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
+ {
+ if(!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed. Driver unusable.",
+ __FUNCTION__));
+ goto return_fault;
+ }
+ }
+ else
+ {
+ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress",
+ __FUNCTION__));
+ goto return_fault;
+ }
+ else
+ {
+
+ switch(ui32BridgeID)
+ {
+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES):
+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES):
+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT):
+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT):
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.",
+ __FUNCTION__));
+ goto return_fault;
+ }
+ }
+ }
+ }
#if defined(__linux__)
- {
-
- SYS_DATA *psSysData;
+ {
+
+ SYS_DATA *psSysData;
- SysAcquireData(&psSysData);
+ SysAcquireData(&psSysData);
-
- psBridgeIn = ((ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData;
- psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + PVRSRV_MAX_BRIDGE_IN_SIZE);
+
+ psBridgeIn = ((ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData;
+ psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + PVRSRV_MAX_BRIDGE_IN_SIZE);
if((psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE) ||
}
- if(psBridgePackageKM->ui32InBufferSize > 0)
- {
- if(!OSAccessOK(PVR_VERIFY_READ,
- psBridgePackageKM->pvParamIn,
- psBridgePackageKM->ui32InBufferSize))
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pvParamIn pointer", __FUNCTION__));
- }
-
- if(CopyFromUserWrapper(psPerProc,
- ui32BridgeID,
- psBridgeIn,
- psBridgePackageKM->pvParamIn,
- psBridgePackageKM->ui32InBufferSize)
- != PVRSRV_OK)
- {
- goto return_fault;
- }
- }
- }
+ if(psBridgePackageKM->ui32InBufferSize > 0)
+ {
+ if(!OSAccessOK(PVR_VERIFY_READ,
+ psBridgePackageKM->pvParamIn,
+ psBridgePackageKM->ui32InBufferSize))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pvParamIn pointer", __FUNCTION__));
+ }
+
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ psBridgeIn,
+ psBridgePackageKM->pvParamIn,
+ psBridgePackageKM->ui32InBufferSize)
+ != PVRSRV_OK)
+ {
+ goto return_fault;
+ }
+ }
+ }
#else
- psBridgeIn = psBridgePackageKM->pvParamIn;
- psBridgeOut = psBridgePackageKM->pvParamOut;
+ psBridgeIn = psBridgePackageKM->pvParamIn;
+ psBridgeOut = psBridgePackageKM->pvParamOut;
#endif
- if(ui32BridgeID >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT))
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: ui32BridgeID = %d is out if range!",
- __FUNCTION__, ui32BridgeID));
- goto return_fault;
- }
- pfBridgeHandler =
- (BridgeWrapperFunction)g_BridgeDispatchTable[ui32BridgeID].pfFunction;
- err = pfBridgeHandler(ui32BridgeID,
- psBridgeIn,
- psBridgeOut,
- psPerProc);
- if(err < 0)
- {
- goto return_fault;
- }
+ if(ui32BridgeID >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: ui32BridgeID = %d is out if range!",
+ __FUNCTION__, ui32BridgeID));
+ goto return_fault;
+ }
+ pfBridgeHandler =
+ (BridgeWrapperFunction)g_BridgeDispatchTable[ui32BridgeID].pfFunction;
+ err = pfBridgeHandler(ui32BridgeID,
+ psBridgeIn,
+ psBridgeOut,
+ psPerProc);
+ if(err < 0)
+ {
+ goto return_fault;
+ }
#if defined(__linux__)
-
- if(CopyToUserWrapper(psPerProc,
- ui32BridgeID,
- psBridgePackageKM->pvParamOut,
- psBridgeOut,
- psBridgePackageKM->ui32OutBufferSize)
- != PVRSRV_OK)
- {
- goto return_fault;
- }
+
+ if(CopyToUserWrapper(psPerProc,
+ ui32BridgeID,
+ psBridgePackageKM->pvParamOut,
+ psBridgeOut,
+ psBridgePackageKM->ui32OutBufferSize)
+ != PVRSRV_OK)
+ {
+ goto return_fault;
+ }
#endif
- err = 0;
+ err = 0;
return_fault:
- ReleaseHandleBatch(psPerProc);
- return err;
+ ReleaseHandleBatch(psPerProc);
+ return err;
}
return 0;
}
+static IMG_INT
+SGXSetTransferContextPriorityBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_SET_TRANSFER_CONTEXT_PRIORITY *psSGXSetTransferContextPriorityIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hTransferContextInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXSetTransferContextPriorityIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hTransferContextInt,
+ psSGXSetTransferContextPriorityIN->hHWTransferContext,
+ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = SGXSetTransferContextPriorityKM(
+ hDevCookieInt,
+ hTransferContextInt,
+ psSGXSetTransferContextPriorityIN->ui32Priority,
+ psSGXSetTransferContextPriorityIN->ui32OffsetOfPriorityField);
+
+ return 0;
+}
+
+static IMG_INT
+SGXSetRenderContextPriorityBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_SET_RENDER_CONTEXT_PRIORITY *psSGXSetRenderContextPriorityIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hRenderContextInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXSetRenderContextPriorityIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hRenderContextInt,
+ psSGXSetRenderContextPriorityIN->hHWRenderContext,
+ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = SGXSetRenderContextPriorityKM(
+ hDevCookieInt,
+ hRenderContextInt,
+ psSGXSetRenderContextPriorityIN->ui32Priority,
+ psSGXSetRenderContextPriorityIN->ui32OffsetOfPriorityField);
+
+ return 0;
+}
+
#if defined(SGX_FEATURE_2D_HARDWARE)
static IMG_INT
#endif
-#if defined(FIX_HW_BRN_31542)
+#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513)
eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
&hDummy,
psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo,
#endif
-#if defined(FIX_HW_BRN_31542)
+#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513)
eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
#if defined (SUPPORT_SID_INTERFACE)
&asInitInfoKM.hKernelClearClipWAVDMStreamMemInfo,
#endif
#endif
-#if defined(FIX_HW_BRN_31542)
+#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513)
#if defined (SUPPORT_SID_INTERFACE)
eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAVDMStreamMemInfo);
if (eError != PVRSRV_OK)
hHWRenderContextInt =
SGXRegisterHWRenderContextKM(hDevCookieInt,
- &psSGXRegHWRenderContextIN->sHWRenderContextDevVAddr,
- psPerProc);
+ psSGXRegHWRenderContextIN->pHWRenderContextCpuVAddr,
+ psSGXRegHWRenderContextIN->ui32HWRenderContextSize,
+ psSGXRegHWRenderContextIN->ui32OffsetToPDDevPAddr,
+ psSGXRegHWRenderContextIN->hDevMemContext,
+ &psSGXRegHWRenderContextOUT->sHWRenderContextDevVAddr,
+ psPerProc);
if (hHWRenderContextInt == IMG_NULL)
{
hHWTransferContextInt =
SGXRegisterHWTransferContextKM(hDevCookieInt,
- &psSGXRegHWTransferContextIN->sHWTransferContextDevVAddr,
+ psSGXRegHWTransferContextIN->pHWTransferContextCpuVAddr,
+ psSGXRegHWTransferContextIN->ui32HWTransferContextSize,
+ psSGXRegHWTransferContextIN->ui32OffsetToPDDevPAddr,
+ psSGXRegHWTransferContextIN->hDevMemContext,
+ &psSGXRegHWTransferContextOUT->sHWTransferContextDevVAddr,
psPerProc);
if (hHWTransferContextInt == IMG_NULL)
hHW2DContextInt =
SGXRegisterHW2DContextKM(hDevCookieInt,
- &psSGXRegHW2DContextIN->sHW2DContextDevVAddr,
+ psSGXRegHW2DContextIN->pHW2DContextCpuVAddr,
+ psSGXRegHW2DContextIN->ui32HW2DContextSize,
+ psSGXRegHW2DContextIN->ui32OffsetToPDDevPAddr,
+ psSGXRegHW2DContextIN->hDevMemContext,
+ &psSGXRegHW2DContextOUT->sHW2DContextDevVAddr,
psPerProc);
if (hHW2DContextInt == IMG_NULL)
SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, SGX2DQueryBlitsCompleteBW);
- SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMMUPDADDR, DummyBW);
-
#if defined(TRANSFER_QUEUE)
SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, SGXSubmitTransferBW);
#endif
SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES, SGXScheduleProcessQueuesBW);
SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_CB, SGXReadHWPerfCBBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY, SGXSetRenderContextPriorityBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY, SGXSetTransferContextPriorityBW);
#if defined(PDUMP)
SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY, SGXPDumpBufferArrayBW);
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
+IMG_UINT32 BM_XProcWorkaroundGetRefCount(IMG_UINT32 ui32Index)
+{
+ return gXProcWorkaroundShareData[ui32Index].ui32RefCount;
+}
+
static PVRSRV_ERROR
XProcWorkaroundAllocShareable(RA_ARENA *psArena,
IMG_UINT32 ui32AllocFlags,
psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)pvParam;
psBCInfo = psBCPerContextInfo->psBCInfo;
-
+#if 0
for (i = 0; i < psBCInfo->ui32BufferCount; i++)
{
if (psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount != 0)
return PVRSRV_ERROR_STILL_MAPPED;
}
}
-
+#endif
psBCInfo->ui32RefCount--;
if(psBCInfo->ui32RefCount == 0)
{
+ for (i = 0; i < psBCInfo->ui32BufferCount; i++)
+ {
+ if (psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CloseBCDeviceCallBack: buffer %d (0x%p) still mapped (ui32MemMapRefCount = %d)",
+ i,
+ &psBCInfo->psBuffer[i].sDeviceClassBuffer,
+ psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount));
+ return PVRSRV_ERROR_STILL_MAPPED;
+ }
+ }
psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->ui32DeviceID, psBCInfo->hExtDevice);
#if defined (PVRSRV_FLUSH_KERNEL_OPS_LAST_ONLY)
if (psMemInfo->psKernelSyncInfo)
{
- if (psMemInfo->psKernelSyncInfo->ui32RefCount == 1)
+ if (psMemInfo->sShareMemWorkaround.bInUse)
{
+
+ if (BM_XProcWorkaroundGetRefCount(psMemInfo->sShareMemWorkaround.ui32ShareIndex)
+ == 1)
+ {
+ FlushKernelOps(psMemInfo->psKernelSyncInfo->psSyncData);
+ }
+ }
+ else
+ {
+
+
+
+
+
+
+
+
FlushKernelOps(psMemInfo->psKernelSyncInfo->psSyncData);
}
}
while (OSLockResource(&psSysData->sQProcessResource, ISR_ID) != PVRSRV_OK)
{
- OSWaitus(1);
+ OSSleepms(1);
};
psQueue = psSysData->psQueueList;
#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
PPVRSRV_KERNEL_MEM_INFO psKernelEDMStatusBufferMemInfo;
#endif
-#if defined(SGX_FEATURE_OVERLAPPED_SPM)
- PPVRSRV_KERNEL_MEM_INFO psKernelTmpRgnHeaderMemInfo;
-#endif
IMG_UINT32 ui32ClientRefCount;
#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
IMG_HANDLE hKernelEDMStatusBufferMemInfo;
#endif
-#if defined(SGX_FEATURE_OVERLAPPED_SPM)
- IMG_HANDLE hKernelTmpRgnHeaderMemInfo;
-#endif
IMG_UINT32 ui32EDMTaskReg0;
IMG_UINT32 ui32EDMTaskReg1;
******************************************************************************/
#include <stddef.h>
-#include <linux/delay.h>
-#include <linux/io.h>
#include "sgxdefs.h"
#include "sgxmmu.h"
#include "lists.h"
#include "srvkm.h"
#include "ttrace.h"
-
-#define WR_MEM_32(addr, data) *(unsigned int*)(addr) = data
-#define RD_MEM_32(addr) *(unsigned int*)(addr)
-#define UWORD32 unsigned int
-
-#ifdef PLAT_TI81xx
-#define SGX_TI81xx_CLK_DVDR_ADDR 0x481803b0
-#define CLKCTRL 0x4
-#define TENABLE 0x8
-#define TENABLEDIV 0xC
-#define M2NDIV 0x10
-#define MN2DIV 0x14
-#define STATUS 0x24
-#define PLL_BASE_ADDRESS 0x481C5000
-#define SGX_PLL_BASE (PLL_BASE_ADDRESS+0x0B0)
-#define OSC_0 20
-#define SGX_DIVIDER_ADDR 0x481803B0
-// ADPLLJ_CLKCRTL_Register Value Configurations
-// ADPLLJ_CLKCRTL_Register SPEC bug bit 19,bit29 -- CLKLDOEN,CLKDCOEN
-#define ADPLLJ_CLKCRTL_HS2 0x00000801 //HS2 Mode,TINTZ =1 --used by all PLL's except HDMI
-#endif
-
-
-
#define VAR(x) #x
PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
PVRSRV_DEVICE_NODE *psDeviceNode,
IMG_HANDLE hDevMemContext);
+static IMG_VOID SGXDumpDebugInfo (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bDumpSGXRegs);
+
#if defined(PDUMP)
static
PVRSRV_ERROR SGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode);
#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
psDevInfo->psKernelEDMStatusBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelEDMStatusBufferMemInfo;
#endif
-#if defined(SGX_FEATURE_OVERLAPPED_SPM)
- psDevInfo->psKernelTmpRgnHeaderMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelTmpRgnHeaderMemInfo;
-#endif
#if defined(SGX_FEATURE_SPM_MODE_0)
psDevInfo->psKernelTmpDPMStateMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelTmpDPMStateMemInfo;
#endif
return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
}
-#ifdef PLAT_TI81xx
-//Function to configure PLL clocks. Only required for TI814x. For other devices its taken care in u-boot.
-void PLL_Clocks_Config(UWORD32 Base_Address,UWORD32 OSC_FREQ,UWORD32 N,UWORD32 M,UWORD32 M2,UWORD32 CLKCTRL_VAL)
-{
- UWORD32 m2nval,mn2val,read_clkctrl;
- m2nval = (M2<<16) | N;
- mn2val = M;
- WR_MEM_32((Base_Address+M2NDIV ),m2nval);
- msleep(100);
- WR_MEM_32((Base_Address+MN2DIV ),mn2val);
- msleep(100);
- WR_MEM_32((Base_Address+TENABLEDIV),0x1);
- msleep(100);
- WR_MEM_32((Base_Address+TENABLEDIV),0x0);
- msleep(100);
- WR_MEM_32((Base_Address+TENABLE ),0x1);
- msleep(100);
- WR_MEM_32((Base_Address+TENABLE ),0x0);
- msleep(100);
- read_clkctrl = RD_MEM_32(Base_Address+CLKCTRL);
- //configure the TINITZ(bit0) and CLKDCO BITS IF REQUIRED
- WR_MEM_32(Base_Address+CLKCTRL,(read_clkctrl & 0xff7fe3ff) | CLKCTRL_VAL);
- msleep(100);
- read_clkctrl = RD_MEM_32(Base_Address+CLKCTRL);
-
- // poll for the freq,phase lock to occur
- while (( (RD_MEM_32(Base_Address+STATUS)) & 0x00000600) != 0x00000600);
- //wait fot the clocks to get stabized
- msleep(10);
-}
-#endif
-
-
-
PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
IMG_BOOL bHardwareRecovery)
{
PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo;
SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
static IMG_BOOL bFirstTime = IMG_TRUE;
-#ifdef CONFIG_THRU_PLL
- void __iomem *pll_base;
- void __iomem *div_base;
-#endif
#if defined(PDUMP)
IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended();
#endif
return eError;
}
PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 2\n");
-#ifdef PLAT_TI81xx
- OSWriteHWReg(psDevInfo->pvRegsBaseKM, 0xFF08, 0x80000000);//OCP Bypass mode
-
-#ifdef CONFIG_THRU_PLL
- if(cpu_is_ti816x()) {
- div_base = ioremap(SGX_TI81xx_CLK_DVDR_ADDR,0x100);
- WR_MEM_32((div_base),0x2);
- msleep(100);
- iounmap (div_base);
- } else {
- div_base = ioremap(SGX_TI81xx_CLK_DVDR_ADDR,0x100);
- WR_MEM_32((div_base),0x0);
- pll_base = ioremap(SGX_PLL_BASE,0x100);
- PLL_Clocks_Config((UWORD32)pll_base,OSC_0,19,800,4,ADPLLJ_CLKCRTL_HS2);
- iounmap (div_base);
- iounmap (pll_base);
- }
-#endif
-#else
- if(!(cpu_is_omap3530() || cpu_is_omap3517()))
+ if(!(cpu_is_omap3530() || cpu_is_omap3517()))
{
OSWriteHWReg(psDevInfo->pvRegsBaseKM, 0xFF08, 0x80000000);//OCP Bypass mode
}
-#endif
-
-
psSGXHostCtl->ui32HostClock = OSClockus();
{
PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel initialisation failed"));
#if !defined(FIX_HW_BRN_23281)
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
PVR_DBG_BREAK;
#endif
return PVRSRV_ERROR_RETRY;
}
+#if defined(RESTRICTED_REGISTERS) && defined(SGX_FEATURE_MP)
+
+static IMG_VOID SGXDumpMasterDebugReg (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32RegAddr)
+{
+ IMG_UINT32 ui32RegVal;
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+ PVR_LOG(("(HYD) %s%08X", pszName, ui32RegVal));
+}
+
+#endif
+
static IMG_VOID SGXDumpDebugReg (PVRSRV_SGXDEV_INFO *psDevInfo,
IMG_UINT32 ui32CoreNum,
IMG_CHAR *pszName,
SGXDumpDebugReg(psDevInfo, 0, "EUR_CR_CORE_ID: ", EUR_CR_CORE_ID);
SGXDumpDebugReg(psDevInfo, 0, "EUR_CR_CORE_REVISION: ", EUR_CR_CORE_REVISION);
-
+#if defined(RESTRICTED_REGISTERS) && defined(SGX_FEATURE_MP)
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_INT_STAT: ", EUR_CR_MASTER_BIF_INT_STAT);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_FAULT: ",EUR_CR_MASTER_BIF_FAULT);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_CLKGATESTATUS2: ",EUR_CR_MASTER_CLKGATESTATUS2 );
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_PIM_STATUS: ",EUR_CR_MASTER_VDM_PIM_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_BANK_SET: ",EUR_CR_MASTER_BIF_BANK_SET);
+
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_EVENT_STATUS: ",EUR_CR_MASTER_EVENT_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_EVENT_STATUS2: ",EUR_CR_MASTER_EVENT_STATUS2);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_MP_PRIMITIVE: ",EUR_CR_MASTER_MP_PRIMITIVE);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_DPM_DPLIST_STATUS: ",EUR_CR_MASTER_DPM_DPLIST_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_DPM_PROACTIVE_PIM_SPEC: ",EUR_CR_MASTER_DPM_PROACTIVE_PIM_SPEC);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_PAGE_MANAGEOP: ",EUR_CR_MASTER_DPM_PAGE_MANAGEOP);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_SNAPSHOT: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_SNAPSHOT);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_LOAD_STATUS: ",EUR_CR_MASTER_VDM_CONTEXT_LOAD_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STREAM: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STREAM);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATUS: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE0: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE0);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE1: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE1);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_WAIT_FOR_KICK: ",EUR_CR_MASTER_VDM_WAIT_FOR_KICK);
+#endif
for (ui32CoreNum = 0; ui32CoreNum < SGX_FEATURE_MP_CORE_COUNT_3D; ui32CoreNum++)
{
ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_PAUSE_MASK);
- OSWaitus(200 * 1000000 / psDevInfo->ui32CoreClockSpeed);
+ SGXWaitClocks(psDevInfo, 200);
#endif
bBRN31093Inval = IMG_TRUE;
OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, EUR_CR_BIF_CTRL_INVAL_PTE_MASK);
- OSWaitus(200 * 1000000 / psDevInfo->ui32CoreClockSpeed);
+ SGXWaitClocks(psDevInfo, 200);
#if defined(FIX_HW_BRN_29997)
#include "services_headers.h"
#include "sgxinfokm.h"
#include "sgxconfig.h"
+#include "sgxutils.h"
#include "pdump_km.h"
#endif
- OSWaitus(100 * 1000000 / psDevInfo->ui32CoreClockSpeed);
+ SGXWaitClocks(psDevInfo, 100);
if (bPDump)
{
PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags);
ui32RegVal = EUR_CR_MASTER_SOFT_RESET_BIF_RESET_MASK |
EUR_CR_MASTER_SOFT_RESET_IPF_RESET_MASK |
EUR_CR_MASTER_SOFT_RESET_DPM_RESET_MASK |
+ EUR_CR_MASTER_SOFT_RESET_MCI_RESET_MASK |
EUR_CR_MASTER_SOFT_RESET_VDM_RESET_MASK;
#if defined(SGX_FEATURE_PTLA)
#if defined(SGX_FEATURE_SYSTEM_CACHE)
#if defined(SGX_BYPASS_SYSTEM_CACHE)
- #error SGX_BYPASS_SYSTEM_CACHE not supported
+ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_ALL_MASK;
#else
ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK |
#if defined(FIX_HW_BRN_30954)
EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE3_MASK |
EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TA_MASK;
#endif
+ #endif
OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra SLC bypass control\r\n");
PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
- #endif
#endif
SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
{
if (ui32CallerID == ISR_ID)
{
+ SYS_DATA *psSysData;
+
psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
eError = PVRSRV_OK;
+
+ SysAcquireData(&psSysData);
+ OSScheduleMISR(psSysData);
}
else
{
typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_
{
PVRSRV_DEVICE_NODE *psDeviceNode;
- IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
- IMG_HANDLE hBlockAlloc;
+ PVRSRV_KERNEL_MEM_INFO *psHWRenderContextMemInfo;
+ IMG_HANDLE hBlockAlloc;
PRESMAN_ITEM psResItem;
} SGX_HW_RENDER_CONTEXT_CLEANUP;
PVR_UNREFERENCED_PARAMETER(ui32Param);
eError = SGXCleanupRequest(psCleanup->psDeviceNode,
- &psCleanup->sHWRenderContextDevVAddr,
+ &psCleanup->psHWRenderContextMemInfo->sDevVAddr,
PVRSRV_CLEANUPCMD_RC,
bForceCleanup);
+
+ PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode,
+ psCleanup->psHWRenderContextMemInfo);
+
+
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
psCleanup,
typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_
{
PVRSRV_DEVICE_NODE *psDeviceNode;
- IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
+ PVRSRV_KERNEL_MEM_INFO *psHWTransferContextMemInfo;
IMG_HANDLE hBlockAlloc;
PRESMAN_ITEM psResItem;
} SGX_HW_TRANSFER_CONTEXT_CLEANUP;
PVR_UNREFERENCED_PARAMETER(ui32Param);
eError = SGXCleanupRequest(psCleanup->psDeviceNode,
- &psCleanup->sHWTransferContextDevVAddr,
+ &psCleanup->psHWTransferContextMemInfo->sDevVAddr,
PVRSRV_CLEANUPCMD_TC,
bForceCleanup);
+
+ PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode,
+ psCleanup->psHWTransferContextMemInfo);
+
+
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
psCleanup,
}
IMG_EXPORT
-IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode,
- IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
+IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE hDeviceNode,
+ IMG_CPU_VIRTADDR *psHWRenderContextCpuVAddr,
+ IMG_UINT32 ui32HWRenderContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
PVRSRV_PER_PROCESS_DATA *psPerProc)
{
PVRSRV_ERROR eError;
IMG_HANDLE hBlockAlloc;
SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psHeapInfo;
+ IMG_HANDLE hDevMemContextInt;
+ MMU_CONTEXT *psMMUContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ int iPtrByte;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
PRESMAN_ITEM psResItem;
eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure"));
- return IMG_NULL;
+ goto exit0;
+ }
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID];
+
+ eError = PVRSRVAllocDeviceMemKM(hDeviceNode,
+ psPerProc,
+ psHeapInfo->hDevMemHeap,
+ PVRSRV_MEM_READ | PVRSRV_MEM_WRITE
+ | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT
+ | PVRSRV_MEM_CACHE_CONSISTENT,
+ ui32HWRenderContextSize,
+ 32,
+ &psCleanup->psHWRenderContextMemInfo,
+ "HW Render Context");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate device memory for HW Render Context"));
+ goto exit1;
+ }
+
+ eError = OSCopyFromUser(psPerProc,
+ psCleanup->psHWRenderContextMemInfo->pvLinAddrKM,
+ psHWRenderContextCpuVAddr,
+ ui32HWRenderContextSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't copy user-mode copy of HWContext into device memory"));
+ goto exit2;
}
+
+ psHWRenderContextDevVAddr->uiAddr = psCleanup->psHWRenderContextMemInfo->sDevVAddr.uiAddr;
+
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Can't lookup DevMem Context"));
+ goto exit2;
+ }
+
+ psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt);
+ sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext);
+
+
+
+
+
+
+ pSrc = (IMG_UINT8 *)&sPDDevPAddr;
+ pDst = (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetToPDDevPAddr;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+
+#if defined(PDUMP)
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW Render context struct");
+
+ PDUMPMEM(
+ IMG_NULL,
+ psCleanup->psHWRenderContextMemInfo,
+ 0,
+ ui32HWRenderContextSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psCleanup->psHWRenderContextMemInfo));
+
+
+ PDUMPCOMMENT("Page directory address in HW render context");
+ PDUMPPDDEVPADDR(
+ psCleanup->psHWRenderContextMemInfo,
+ ui32OffsetToPDDevPAddr,
+ sPDDevPAddr,
+ MAKEUNIQUETAG(psCleanup->psHWRenderContextMemInfo),
+ PDUMP_PD_UNIQUETAG);
+#endif
+
psCleanup->hBlockAlloc = hBlockAlloc;
psCleanup->psDeviceNode = psDeviceNode;
- psCleanup->sHWRenderContextDevVAddr = *psHWRenderContextDevVAddr;
psResItem = ResManRegisterRes(psPerProc->hResManContext,
RESMAN_TYPE_HW_RENDER_CONTEXT,
0,
&SGXCleanupHWRenderContextCallback);
- if (psResItem == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed"));
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
- sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
- psCleanup,
- psCleanup->hBlockAlloc);
-
-
- return IMG_NULL;
- }
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed"));
+ goto exit2;
+ }
psCleanup->psResItem = psResItem;
return (IMG_HANDLE)psCleanup;
+
+exit2:
+ PVRSRVFreeDeviceMemKM(hDeviceNode,
+ psCleanup->psHWRenderContextMemInfo);
+exit1:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+
+exit0:
+ return IMG_NULL;
}
IMG_EXPORT
IMG_EXPORT
-IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
- IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
+IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE hDeviceNode,
+ IMG_CPU_VIRTADDR *psHWTransferContextCpuVAddr,
+ IMG_UINT32 ui32HWTransferContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
PVRSRV_PER_PROCESS_DATA *psPerProc)
{
PVRSRV_ERROR eError;
IMG_HANDLE hBlockAlloc;
SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psHeapInfo;
+ IMG_HANDLE hDevMemContextInt;
+ MMU_CONTEXT *psMMUContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ int iPtrByte;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
PRESMAN_ITEM psResItem;
eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure"));
- return IMG_NULL;
+ goto exit0;
}
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID];
+
+ eError = PVRSRVAllocDeviceMemKM(hDeviceNode,
+ psPerProc,
+ psHeapInfo->hDevMemHeap,
+ PVRSRV_MEM_READ | PVRSRV_MEM_WRITE
+ | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT
+ | PVRSRV_MEM_CACHE_CONSISTENT,
+ ui32HWTransferContextSize,
+ 32,
+ &psCleanup->psHWTransferContextMemInfo,
+ "HW Render Context");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate device memory for HW Render Context"));
+ goto exit1;
+ }
+
+ eError = OSCopyFromUser(psPerProc,
+ psCleanup->psHWTransferContextMemInfo->pvLinAddrKM,
+ psHWTransferContextCpuVAddr,
+ ui32HWTransferContextSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't copy user-mode copy of HWContext into device memory"));
+ goto exit2;
+ }
+
+
+ psHWTransferContextDevVAddr->uiAddr = psCleanup->psHWTransferContextMemInfo->sDevVAddr.uiAddr;
+
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Can't lookup DevMem Context"));
+ goto exit2;
+ }
+
+ psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt);
+ sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext);
+
+
+
+
+
+
+ pSrc = (IMG_UINT8 *)&sPDDevPAddr;
+ pDst = (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetToPDDevPAddr;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+
+#if defined(PDUMP)
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW Transfer context struct");
+
+ PDUMPMEM(
+ IMG_NULL,
+ psCleanup->psHWTransferContextMemInfo,
+ 0,
+ ui32HWTransferContextSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psCleanup->psHWTransferContextMemInfo));
+
+
+ PDUMPCOMMENT("Page directory address in HW transfer context");
+
+ PDUMPPDDEVPADDR(
+ psCleanup->psHWTransferContextMemInfo,
+ ui32OffsetToPDDevPAddr,
+ sPDDevPAddr,
+ MAKEUNIQUETAG(psCleanup->psHWTransferContextMemInfo),
+ PDUMP_PD_UNIQUETAG);
+#endif
+
psCleanup->hBlockAlloc = hBlockAlloc;
psCleanup->psDeviceNode = psDeviceNode;
- psCleanup->sHWTransferContextDevVAddr = *psHWTransferContextDevVAddr;
psResItem = ResManRegisterRes(psPerProc->hResManContext,
RESMAN_TYPE_HW_TRANSFER_CONTEXT,
if (psResItem == IMG_NULL)
{
PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: ResManRegisterRes failed"));
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
- sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
- psCleanup,
- psCleanup->hBlockAlloc);
-
-
- return IMG_NULL;
- }
+ goto exit2;
+ }
psCleanup->psResItem = psResItem;
return (IMG_HANDLE)psCleanup;
+
+exit2:
+ PVRSRVFreeDeviceMemKM(hDeviceNode,
+ psCleanup->psHWTransferContextMemInfo);
+exit1:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+
+
+exit0:
+ return IMG_NULL;
}
IMG_EXPORT
return eError;
}
+IMG_EXPORT
+PVRSRV_ERROR SGXSetTransferContextPriorityKM(
+ IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWTransferContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField)
+{
+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ int iPtrByte;
+ PVR_UNREFERENCED_PARAMETER(hDeviceNode);
+
+ if (hHWTransferContext != IMG_NULL)
+ {
+ psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
+
+ if ((ui32OffsetOfPriorityField + sizeof(ui32Priority))
+ >= psCleanup->psHWTransferContextMemInfo->uAllocSize)
+ {
+ PVR_DPF((
+ PVR_DBG_ERROR,
+ "SGXSetTransferContextPriorityKM: invalid context prioirty offset"));
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+
+
+ pDst = (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetOfPriorityField;
+ pSrc = (IMG_UINT8 *)&ui32Priority;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(ui32Priority); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+ }
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXSetRenderContextPriorityKM(
+ IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWRenderContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField)
+{
+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ int iPtrByte;
+ PVR_UNREFERENCED_PARAMETER(hDeviceNode);
+
+ if (hHWRenderContext != IMG_NULL)
+ {
+ psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
+ if ((ui32OffsetOfPriorityField + sizeof(ui32Priority))
+ >= psCleanup->psHWRenderContextMemInfo->uAllocSize)
+ {
+ PVR_DPF((
+ PVR_DBG_ERROR,
+ "SGXSetContextPriorityKM: invalid HWRenderContext prioirty offset"));
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+
+
+ pDst = (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetOfPriorityField;
+
+ pSrc = (IMG_UINT8 *)&ui32Priority;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(ui32Priority); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+ }
+ return PVRSRV_OK;
+}
+
#if defined(SGX_FEATURE_2D_HARDWARE)
typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_
{
PVRSRV_DEVICE_NODE *psDeviceNode;
- IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
+ PVRSRV_KERNEL_MEM_INFO *psHW2DContextMemInfo;
IMG_HANDLE hBlockAlloc;
PRESMAN_ITEM psResItem;
} SGX_HW_2D_CONTEXT_CLEANUP;
PVR_UNREFERENCED_PARAMETER(ui32Param);
- eError = SGXCleanupRequest(psCleanup->psDeviceNode,
- &psCleanup->sHW2DContextDevVAddr,
+
+ eError = SGXCleanupRequest(psCleanup->psDeviceNode,
+ &psCleanup->psHW2DContextMemInfo->sDevVAddr,
PVRSRV_CLEANUPCMD_2DC,
bForceCleanup);
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+
+ PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode,
+ psCleanup->psHW2DContextMemInfo);
+
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
psCleanup,
psCleanup->hBlockAlloc);
-
+
return eError;
}
-IMG_EXPORT
-IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode,
- IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
+IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE hDeviceNode,
+ IMG_CPU_VIRTADDR *psHW2DContextCpuVAddr,
+ IMG_UINT32 ui32HW2DContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
PVRSRV_PER_PROCESS_DATA *psPerProc)
{
PVRSRV_ERROR eError;
IMG_HANDLE hBlockAlloc;
SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psHeapInfo;
+ IMG_HANDLE hDevMemContextInt;
+ MMU_CONTEXT *psMMUContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ int iPtrByte;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
PRESMAN_ITEM psResItem;
eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure"));
- return IMG_NULL;
+ goto exit0;
+ }
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID];
+
+ eError = PVRSRVAllocDeviceMemKM(hDeviceNode,
+ psPerProc,
+ psHeapInfo->hDevMemHeap,
+ PVRSRV_MEM_READ | PVRSRV_MEM_WRITE
+ | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT
+ | PVRSRV_MEM_CACHE_CONSISTENT,
+ ui32HW2DContextSize,
+ 32,
+ &psCleanup->psHW2DContextMemInfo,
+ "HW 2D Context");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate device memory for HW Render Context"));
+ goto exit1;
}
+ eError = OSCopyFromUser(psPerProc,
+ psCleanup->psHW2DContextMemInfo->pvLinAddrKM,
+ psHW2DContextCpuVAddr,
+ ui32HW2DContextSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't copy user-mode copy of HWContext into device memory"));
+ goto exit2;
+ }
+
+
+ psHW2DContextDevVAddr->uiAddr = psCleanup->psHW2DContextMemInfo->sDevVAddr.uiAddr;
+
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Can't lookup DevMem Context"));
+ goto exit2;
+ }
+
+ psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt);
+ sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext);
+
+
+
+
+
+
+ pSrc = (IMG_UINT8 *)&sPDDevPAddr;
+ pDst = (IMG_UINT8 *)psCleanup->psHW2DContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetToPDDevPAddr;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+
+#if defined(PDUMP)
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW 2D context struct");
+
+ PDUMPMEM(
+ IMG_NULL,
+ psCleanup->psHW2DContextMemInfo,
+ 0,
+ ui32HW2DContextSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psCleanup->psHW2DContextMemInfo));
+
+
+ PDUMPCOMMENT("Page directory address in HW 2D transfer context");
+ PDUMPPDDEVPADDR(
+ psCleanup->psHW2DContextMemInfo,
+ ui32OffsetToPDDevPAddr,
+ sPDDevPAddr,
+ MAKEUNIQUETAG(psCleanup->psHW2DContextMemInfo),
+ PDUMP_PD_UNIQUETAG);
+#endif
+
psCleanup->hBlockAlloc = hBlockAlloc;
psCleanup->psDeviceNode = psDeviceNode;
- psCleanup->sHW2DContextDevVAddr = *psHW2DContextDevVAddr;
psResItem = ResManRegisterRes(psPerProc->hResManContext,
RESMAN_TYPE_HW_2D_CONTEXT,
if (psResItem == IMG_NULL)
{
PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: ResManRegisterRes failed"));
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
- sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
- psCleanup,
- psCleanup->hBlockAlloc);
-
-
- return IMG_NULL;
+ goto exit2;
}
psCleanup->psResItem = psResItem;
return (IMG_HANDLE)psCleanup;
+
+exit2:
+ PVRSRVFreeDeviceMemKM(hDeviceNode,
+ psCleanup->psHW2DContextMemInfo);
+exit1:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+
+exit0:
+ return IMG_NULL;
}
IMG_EXPORT
}
+IMG_VOID SGXWaitClocks(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32SGXClocks)
+{
+
+
+ OSWaitus(1 + (ui32SGXClocks * 1000000 / psDevInfo->ui32CoreClockSpeed));
+}
+
+
IMG_EXPORT
PVRSRV_ERROR PVRSRVGetSGXRevDataKM(PVRSRV_DEVICE_NODE* psDeviceNode, IMG_UINT32 *pui32SGXCoreRev,
IMG_IMPORT
IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode,
- IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
+ IMG_CPU_VIRTADDR *psHWRenderContextCpuVAddr,
+ IMG_UINT32 ui32HWRenderContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
PVRSRV_PER_PROCESS_DATA *psPerProc);
IMG_IMPORT
-IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
- IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
- PVRSRV_PER_PROCESS_DATA *psPerProc);
+IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
+ IMG_CPU_VIRTADDR *psHWTransferContextCpuVAddr,
+ IMG_UINT32 ui32HWTransferContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
IMG_IMPORT
PVRSRV_ERROR SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo,
IMG_IMPORT
PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext, IMG_BOOL bForceCleanup);
+IMG_IMPORT
+PVRSRV_ERROR SGXSetRenderContextPriorityKM(IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWRenderContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXSetTransferContextPriorityKM(IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWTransferContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField);
+
#if defined(SGX_FEATURE_2D_HARDWARE)
IMG_IMPORT
IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode,
- IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
+ IMG_CPU_VIRTADDR *psHW2DContextCpuVAddr,
+ IMG_UINT32 ui32HW2DContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
PVRSRV_PER_PROCESS_DATA *psPerProc);
IMG_IMPORT
IMG_UINT32 ui32TimeWraps,
IMG_UINT32 ui32Time);
+IMG_VOID SGXWaitClocks(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32SGXClocks);
+
PVRSRV_ERROR SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
IMG_DEV_VIRTADDR *psHWDataDevVAddr,
IMG_UINT32 ui32CleanupType,
#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, 0, wait)
#endif
+//#if defined(PVR_LINUX_USING_WORKQUEUES) && !defined(CONFIG_PREEMPT)
+//#error "A preemptible Linux kernel is required when using workqueues"
+//#endif
#if defined(EMULATOR)
#define EVENT_OBJECT_TIMEOUT_MS (2000)
(PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)psBridgePackageKM->pvParamOut;
PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
- psPrivateData->hKernelMemInfo = psExportDeviceMemOUT->hMemInfo;
+ if (pvr_get_user(psPrivateData->hKernelMemInfo, &psExportDeviceMemOUT->hMemInfo) != 0)
+ {
+ err = -EFAULT;
+ goto unlock_and_return;
+ }
#if defined(SUPPORT_MEMINFO_IDS)
psPrivateData->ui64Stamp = ++ui64Stamp;
- psExportDeviceMemOUT->ui64Stamp = psPrivateData->ui64Stamp;
if (pvr_put_user(psPrivateData->ui64Stamp, &psExportDeviceMemOUT->ui64Stamp) != 0)
{
err = -EFAULT;
static PVRSRV_LINUX_MUTEX gsDebugMutexNonIRQ;
-static DEFINE_SPINLOCK(gsDebugLockIRQ);
-//static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED;
+#else
+static DEFINE_SPINLOCK(gsDebugLockIRQ);
+#endif
#if !defined (USE_SPIN_LOCK)
#define USE_SPIN_LOCK (in_interrupt() || !preemptible())
+++ /dev/null
-/**********************************************************************
- *
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful but, except
- * as otherwise stated in writing, without any warranty; without even the
- * implied warranty of merchantability or fitness for a particular purpose.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
- *
- ******************************************************************************/
-
-#if defined(SUPPORT_DRI_DRM)
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
-#ifndef AUTOCONF_INCLUDED
- #include <linux/config.h>
-#endif
-#endif
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/sched.h>
-#include <asm/ioctl.h>
-#include <drm/drmP.h>
-#include <drm/drm.h>
-
-#include "img_defs.h"
-#include "services.h"
-#include "kerneldisplay.h"
-#include "kernelbuffer.h"
-#include "syscommon.h"
-#include "pvrmmap.h"
-#include "mm.h"
-#include "mmap.h"
-#include "mutex.h"
-#include "pvr_debug.h"
-#include "srvkm.h"
-#include "perproc.h"
-#include "handle.h"
-#include "pvr_bridge_km.h"
-#include "pvr_bridge.h"
-#include "proc.h"
-#include "pvrmodule.h"
-#include "pvrversion.h"
-#include "lock.h"
-#include "linkage.h"
-#include "pvr_drm.h"
-
-#if defined(PVR_DRI_DRM_NOT_PCI)
-#include "pvr_drm_mod.h"
-#endif
-
-#define PVR_DRM_NAME PVRSRV_MODNAME
-#define PVR_DRM_DESC "Imagination Technologies PVR DRM"
-
-DECLARE_WAIT_QUEUE_HEAD(sWaitForInit);
-
-IMG_BOOL bInitComplete;
-IMG_BOOL bInitFailed;
-
-#if !defined(PVR_DRI_DRM_NOT_PCI)
-struct pci_dev *gpsPVRLDMDev;
-#endif
-
-struct drm_device *gpsPVRDRMDev;
-
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
-#error "Linux kernel version 2.6.25 or later required for PVR DRM support"
-#endif
-
-#define PVR_DRM_FILE struct drm_file *
-
-#if !defined(SUPPORT_DRI_DRM_EXT)
-static struct pci_device_id asPciIdList[] = {
-#if defined(PVR_DRI_DRM_NOT_PCI)
- {1, 1, 1, 1, 0, 0, 0},
-#else
- {SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
-#if defined(SYS_SGX_DEV1_DEVICE_ID)
- {SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV1_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
-#endif
-#endif
- {0}
-};
-#endif
-
-DRI_DRM_STATIC int
-PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags)
-{
- int iRes=0;
-
- PVR_TRACE(("PVRSRVDrmLoad"));
-
- gpsPVRDRMDev = dev;
-#if !defined(PVR_DRI_DRM_NOT_PCI)
- gpsPVRLDMDev = dev->pdev;
-#endif
-
-#if defined(PDUMP)
- iRes = dbgdrv_init();
- if (iRes != 0)
- {
- goto exit;
- }
-#endif
-
- iRes = PVRCore_Init();
- if (iRes != 0)
- {
- goto exit_dbgdrv_cleanup;
- }
-
-#if defined(DISPLAY_CONTROLLER)
- iRes = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(dev);
- if (iRes != 0)
- {
- goto exit_pvrcore_cleanup;
- }
-#endif
-// return 0;
- goto exit;
-#if defined(DISPLAY_CONTROLLER)
-exit_pvrcore_cleanup:
- PVRCore_Cleanup();
-#endif
-exit_dbgdrv_cleanup:
-#if defined(PDUMP)
- dbgdrv_cleanup();
-#endif
-exit:
- if (iRes != 0)
- {
- bInitFailed = IMG_TRUE;
- }
- bInitComplete = IMG_TRUE;
-
- wake_up_interruptible(&sWaitForInit);
-
-
- return iRes;
-}
-
-DRI_DRM_STATIC int
-PVRSRVDrmUnload(struct drm_device *dev)
-{
- PVR_TRACE(("PVRSRVDrmUnload"));
-
-#if defined(DISPLAY_CONTROLLER)
- PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(dev);
-#endif
-
- PVRCore_Cleanup();
-
-#if defined(PDUMP)
- dbgdrv_cleanup();
-#endif
-
- return 0;
-}
-
-DRI_DRM_STATIC int
-PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file)
-{
-while (!bInitComplete)
- {
- DEFINE_WAIT(sWait);
-
- prepare_to_wait(&sWaitForInit, &sWait, TASK_INTERRUPTIBLE);
-
- if (!bInitComplete)
- {
- PVR_TRACE(("%s: Waiting for module initialisation to complete", __FUNCTION__));
-
- schedule();
- }
-
- finish_wait(&sWaitForInit, &sWait);
-
- if (signal_pending(current))
- {
- return -ERESTARTSYS;
- }
- }
-
- if (bInitFailed)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Module initialisation failed", __FUNCTION__));
- return -EINVAL;
- }
-
-
- return PVRSRVOpen(dev, file);
-}
-
-#if defined(SUPPORT_DRI_DRM_EXT) && !defined(PVR_LINUX_USING_WORKQUEUES)
-DRI_DRM_STATIC void
-PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file)
-{
- PVRSRVRelease(file->driver_priv);
-
- file->driver_priv = NULL;
-}
-#else
-DRI_DRM_STATIC int
-PVRSRVDrmRelease(struct inode *inode, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
- void *psDriverPriv = file_priv->driver_priv;
- int ret;
-
- ret = drm_release(inode, filp);
-
- if (ret != 0)
- {
-
- PVR_DPF((PVR_DBG_ERROR, "%s : drm_release failed: %d",
- __FUNCTION__, ret));
- }
-
- PVRSRVRelease(psDriverPriv);
-
- return 0;
-}
-#endif
-
-DRI_DRM_STATIC int
-PVRDRMIsMaster(struct drm_device *dev, void *arg, struct drm_file *pFile)
-{
- return 0;
-}
-
-#if defined(SUPPORT_DRI_DRM_EXT)
-int
-PVRDRM_Dummy_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile)
-{
- return 0;
-}
-#endif
-
-DRI_DRM_STATIC int
-PVRDRMUnprivCmd(struct drm_device *dev, void *arg, struct drm_file *pFile)
-{
- int ret = 0;
-
- LinuxLockMutex(&gPVRSRVLock);
-
- if (arg == NULL)
- {
- ret = -EFAULT;
- }
- else
- {
- IMG_UINT32 *pui32Args = (IMG_UINT32 *)arg;
- IMG_UINT32 ui32Cmd = pui32Args[0];
- IMG_UINT32 *pui32OutArg = (IMG_UINT32 *)arg;
-
- switch (ui32Cmd)
- {
- case PVR_DRM_UNPRIV_INIT_SUCCESFUL:
- *pui32OutArg = PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL) ? 1 : 0;
- break;
-
- default:
- ret = -EFAULT;
- }
-
- }
-
- LinuxUnLockMutex(&gPVRSRVLock);
-
- return ret;
-}
-
-#if defined(DISPLAY_CONTROLLER) && defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL)
-static int
-PVRDRM_Display_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile)
-{
- int res;
-
- LinuxLockMutex(&gPVRSRVLock);
-
- res = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Ioctl)(dev, arg, pFile);
-
- LinuxUnLockMutex(&gPVRSRVLock);
-
- return res;
-}
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
-#define PVR_DRM_FOPS_IOCTL .unlocked_ioctl
-#define PVR_DRM_UNLOCKED DRM_UNLOCKED
-#define DRM_IOCTL_PVR_DRM_SRVKM_IOCTL PVR_DRM_SRVKM_IOCTL
-#define DRM_PVR_DRM_SRVKM_IOCTL PVR_DRM_SRVKM_IOCTL
-#define DRM_PVR_DRM_IS_MASTER_IOCTL PVR_DRM_IS_MASTER_IOCTL
-#define DRM_IOCTL_PVR_DRM_IS_MASTER_IOCTL PVR_DRM_IS_MASTER_IOCTL
-#define DRM_PVR_DRM_UNPRIV_IOCTL PVR_DRM_UNPRIV_IOCTL
-#define DRM_IOCTL_PVR_DRM_UNPRIV_IOCTL PVR_DRM_UNPRIV_IOCTL
-#else
-#define PVR_DRM_FOPS_IOCTL .ioctl
-#define PVR_DRM_UNLOCKED 0
-#endif
-
-#if !defined(SUPPORT_DRI_DRM_EXT)
-struct drm_ioctl_desc sPVRDrmIoctls[] = {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
- DRM_IOCTL_DEF_DRV(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, PVR_DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER | PVR_DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, PVR_DRM_UNLOCKED),
-#else
- DRM_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, PVR_DRM_UNLOCKED),
- DRM_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER | PVR_DRM_UNLOCKED),
- DRM_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, PVR_DRM_UNLOCKED),
-#endif
-#if defined(PDUMP)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
- DRM_IOCTL_DEF_DRV(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, PVR_DRM_UNLOCKED),
-#else
- DRM_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, PVR_DRM_UNLOCKED),
-#endif
-#endif
-#if defined(DISPLAY_CONTROLLER) && defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
- DRM_IOCTL_DEF_DRV(PVR_DRM_DISP_IOCTL, PVRDRM_Display_ioctl, DRM_MASTER | PVR_DRM_UNLOCKED)
-#else
- DRM_IOCTL_DEF(PVR_DRM_DISP_IOCTL, PVRDRM_Display_ioctl, DRM_MASTER | PVR_DRM_UNLOCKED)
-#endif
-#endif
-};
-
-static int pvr_max_ioctl = DRM_ARRAY_SIZE(sPVRDrmIoctls);
-
-static struct drm_driver sPVRDrmDriver =
-{
- .driver_features = 0,
- .dev_priv_size = 0,
- .load = PVRSRVDrmLoad,
- .unload = PVRSRVDrmUnload,
- .open = PVRSRVDrmOpen,
- .suspend = PVRSRVDriverSuspend,
- .resume = PVRSRVDriverResume,
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32))
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
-#endif
- .ioctls = sPVRDrmIoctls,
- .fops =
- {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = PVRSRVDrmRelease,
- PVR_DRM_FOPS_IOCTL = drm_ioctl,
- .mmap = PVRMMap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- },
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38))
- .pci_driver =
- {
- .name = PVR_DRM_NAME,
- .id_table = asPciIdList,
- },
-#else
- .kdriver =
- {
- PVR_DRM_NAME,
- asPciIdList,
- },
-// {
-// .pci->name = PVR_DRM_NAME,
-// .pci->id_table = asPciIdList,
-// },
-#endif
-
- .name = PVR_DRM_NAME,
- .desc = PVR_DRM_DESC,
- .date = PVR_BUILD_DATE,
- .major = PVRVERSION_MAJ,
- .minor = PVRVERSION_MIN,
- .patchlevel = PVRVERSION_BUILD,
-};
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
-static struct pci_driver pci_pvr_driver = {
- .name = PVR_DRM_NAME,
- .id_table = asPciIdList,
-};
-#endif
-
-static int __init PVRSRVDrmInit(void)
-{
- int iRes;
- sPVRDrmDriver.num_ioctls = pvr_max_ioctl;
-
-
- PVRDPFInit();
-
-#if defined(PVR_DRI_DRM_NOT_PCI)
- iRes = drm_pvr_dev_add();
- if (iRes != 0)
- {
- return iRes;
- }
-#endif
-
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38))
- iRes = drm_init(&sPVRDrmDriver);
-#else
- iRes = drm_pci_init(&sPVRDrmDriver,&pci_pvr_driver);
-#endif
-#if defined(PVR_DRI_DRM_NOT_PCI)
- if (iRes != 0)
- {
- drm_pvr_dev_remove();
- }
-#endif
- return iRes;
-}
-
-static void __exit PVRSRVDrmExit(void)
-{
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38))
- drm_exit(&sPVRDrmDriver);
-#else
- drm_pci_exit(&sPVRDrmDriver,&pci_pvr_driver);
-#endif
-
-#if defined(PVR_DRI_DRM_NOT_PCI)
- drm_pvr_dev_remove();
-#endif
-}
-
-module_init(PVRSRVDrmInit);
-module_exit(PVRSRVDrmExit);
-#endif
-#endif
-
-
+++ /dev/null
-/**********************************************************************
- *
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful but, except
- * as otherwise stated in writing, without any warranty; without even the
- * implied warranty of merchantability or fitness for a particular purpose.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
- *
- ******************************************************************************/
-
-#if !defined(__PVR_DRM_H__)
-#define __PVR_DRM_H__
-
-#include "pvr_drm_shared.h"
-
-#if defined(SUPPORT_DRI_DRM)
-#define PVR_DRM_MAKENAME_HELPER(x, y) x ## y
-#define PVR_DRM_MAKENAME(x, y) PVR_DRM_MAKENAME_HELPER(x, y)
-
-int PVRCore_Init(void);
-void PVRCore_Cleanup(void);
-int PVRSRVOpen(struct drm_device *dev, struct drm_file *pFile);
-void PVRSRVRelease(void *pvPrivData);
-int PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state);
-int PVRSRVDriverResume(struct drm_device *pDevice);
-
-int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg, struct drm_file *pFile);
-
-#if defined(SUPPORT_DRI_DRM_EXT)
-#define DRI_DRM_STATIC
-int PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags);
-int PVRSRVDrmUnload(struct drm_device *dev);
-int PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file);
-#if defined(PVR_LINUX_USING_WORKQUEUES)
-DRI_DRM_STATIC int PVRSRVDrmRelease(struct inode *inode, struct file *filp);
-#else
-void PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file);
-#endif
-int PVRDRMIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
-int PVRDRMUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
-int PVRDRM_Dummy_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
-#else
-#define DRI_DRM_STATIC static
-#endif
-
-#if defined(DISPLAY_CONTROLLER)
-extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device *);
-extern void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device *);
-extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Suspend)(struct drm_device *);
-extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Resume)(struct drm_device *);
-#if defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL)
-extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Ioctl)(struct drm_device *dev, void *arg, struct drm_file *pFile);
-#endif
-#endif
-
-#if defined(PDUMP)
-int dbgdrv_init(void);
-void dbgdrv_cleanup(void);
-IMG_INT dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
-#endif
-
-#if !defined(SUPPORT_DRI_DRM_EXT)
-#define PVR_DRM_SRVKM_IOCTL _IO(0, PVR_DRM_SRVKM_CMD)
-#define PVR_DRM_IS_MASTER_IOCTL _IO(0, PVR_DRM_IS_MASTER_CMD)
-#define PVR_DRM_UNPRIV_IOCTL _IO(0, PVR_DRM_UNPRIV_CMD)
-#define PVR_DRM_DBGDRV_IOCTL _IO(0, PVR_DRM_DBGDRV_CMD)
-#define PVR_DRM_DISP_IOCTL _IO(0, PVR_DRM_DISP_CMD)
-#endif
-
-#endif
-
-#endif
-
-
}
#define pvr_put_user put_user
+#define pvr_get_user get_user
#endif
#define FIX_HW_BRN_28889
#else
#if SGX_CORE_REV == 111
- #define FIX_HW_BRN_28889
#else
#if SGX_CORE_REV == SGX_CORE_REV_HEAD
#define FIX_HW_BRN_33657
#endif
#define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 122
#define FIX_HW_BRN_29954
#define FIX_HW_BRN_33657
#endif
#define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 1221
#define FIX_HW_BRN_33657
#endif
#define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 140
#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
#define FIX_HW_BRN_33657
#endif
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 1401
#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
#define FIX_HW_BRN_33657
#endif
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 141
#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
#define FIX_HW_BRN_33657
#endif
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 142
#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
#define FIX_HW_BRN_33657
#endif
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 211
#define FIX_HW_BRN_33657
#endif
#define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 2111
#define FIX_HW_BRN_33657
#endif
#define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 213
#define FIX_HW_BRN_33657
#endif
#define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 216
#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
#define FIX_HW_BRN_33657
#endif
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 302
#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
#define FIX_HW_BRN_33657
#endif
#define FIX_HW_BRN_33920
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_36513
+ #endif
#else
#if SGX_CORE_REV == 103
#define FIX_HW_BRN_29954
#define FIX_HW_BRN_33657
#endif
#define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 104
#define FIX_HW_BRN_29954
#define FIX_HW_BRN_33657
#endif
#define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 105
#if defined(SGX_FEATURE_MP)
#define FIX_HW_BRN_33657
#endif
#define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == 106
#define FIX_HW_BRN_31272
#define FIX_HW_BRN_31780
- #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
- #define FIX_HW_BRN_33657
- #endif
#define FIX_HW_BRN_33920
#else
#if SGX_CORE_REV == 110
#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
#define FIX_HW_BRN_33657
#endif
+ #if defined(SGX_FEATURE_MP)
+ #if SGX_FEATURE_MP_CORE_COUNT > 1
+ #define FIX_HW_BRN_36513
+ #endif
+ #endif
#else
#if SGX_CORE_REV == SGX_CORE_REV_HEAD
#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
#define FIX_HW_BRN_33657
#endif
+ #define FIX_HW_BRN_36513
#else
#if SGX_CORE_REV == SGX_CORE_REV_HEAD
#define SGX_CORE_FRIENDLY_NAME "SGX520"
#define SGX_CORE_ID SGX_CORE_ID_520
#define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
+ #define SGX_FEATURE_NUM_USE_PIPES (1)
#define SGX_FEATURE_AUTOCLOCKGATING
#else
#if defined(SGX530)
#define SGX_CORE_FRIENDLY_NAME "SGX530"
#define SGX_CORE_ID SGX_CORE_ID_530
#define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
+ #define SGX_FEATURE_NUM_USE_PIPES (2)
#define SGX_FEATURE_AUTOCLOCKGATING
#else
#if defined(SGX531)
#define SGX_CORE_FRIENDLY_NAME "SGX531"
#define SGX_CORE_ID SGX_CORE_ID_531
#define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
+ #define SGX_FEATURE_NUM_USE_PIPES (2)
#define SGX_FEATURE_AUTOCLOCKGATING
#define SGX_FEATURE_MULTI_EVENT_KICK
#else
#define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
#define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
#define SGX_FEATURE_2D_HARDWARE
+ #define SGX_FEATURE_NUM_USE_PIPES (2)
#define SGX_FEATURE_AUTOCLOCKGATING
#define SUPPORT_SGX_GENERAL_MAPPING_HEAP
#define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE
#define SGX_CORE_FRIENDLY_NAME "SGX540"
#define SGX_CORE_ID SGX_CORE_ID_540
#define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
+ #define SGX_FEATURE_NUM_USE_PIPES (4)
#define SGX_FEATURE_AUTOCLOCKGATING
#define SGX_FEATURE_MULTI_EVENT_KICK
#else
#define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
#define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
#define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
+ #define SGX_FEATURE_NUM_USE_PIPES (4)
#define SGX_FEATURE_AUTOCLOCKGATING
#define SGX_FEATURE_MONOLITHIC_UKERNEL
#define SGX_FEATURE_MULTI_EVENT_KICK
#define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
#define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
#define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
+ #define SGX_FEATURE_NUM_USE_PIPES (4)
#define SGX_FEATURE_AUTOCLOCKGATING
#define SGX_FEATURE_MONOLITHIC_UKERNEL
#define SGX_FEATURE_MULTI_EVENT_KICK
#define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
#define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
#define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
+ #define SGX_FEATURE_NUM_USE_PIPES (8)
#define SGX_FEATURE_AUTOCLOCKGATING
#define SGX_FEATURE_MONOLITHIC_UKERNEL
#define SGX_FEATURE_MULTI_EVENT_KICK
PVRSRV_ERROR BM_XProcWorkaroundSetShareIndex(IMG_UINT32 ui32Index);
PVRSRV_ERROR BM_XProcWorkaroundUnsetShareIndex(IMG_UINT32 ui32Index);
PVRSRV_ERROR BM_XProcWorkaroundFindNewBufferAndSetShareIndex(IMG_UINT32 *pui32Index);
-
+IMG_UINT32 BM_XProcWorkaroundGetRefCount(IMG_UINT32 ui32Index);
#if defined(__cplusplus)
}
#define PDUMPMALLOCPAGETABLE PDumpMallocPageTable
#define PDUMPSETMMUCONTEXT PDumpSetMMUContext
#define PDUMPCLEARMMUCONTEXT PDumpClearMMUContext
+ #define PDUMPPDDEVPADDR PDumpPDDevPAddrKM
#define PDUMPFREEPAGES PDumpFreePages
#define PDUMPFREEPAGETABLE PDumpFreePageTable
#define PDUMPPDREG PDumpPDReg
#define PDUMPMALLOCPAGETABLE(args...)
#define PDUMPSETMMUCONTEXT(args...)
#define PDUMPCLEARMMUCONTEXT(args...)
+ #define PDUMPPDDEVPADDR(args...)
#define PDUMPFREEPAGES(args...)
#define PDUMPFREEPAGETABLE(args...)
#define PDUMPPDREG(args...)
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
*
******************************************************************************/
+#include "sysconfig.h"
#include "services_headers.h"
#include "kerneldisplay.h"
#include "oemfuncs.h"
#include "sgxinfo.h"
#include "sgxinfokm.h"
#include "syslocal.h"
-#include "sysconfig.h"
#include "ocpdefs.h"
-#if !defined(NO_HARDWARE) && \
- defined(SYS_USING_INTERRUPTS) && \
- defined(SGX530) && (SGX_CORE_REV == 125)
-#define SGX_OCP_REGS_ENABLED
-#endif
-
SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
SYS_DATA gsSysData;
#define DEVICE_SGX_INTERRUPT (1 << 0)
-#if defined(NO_HARDWARE)
+#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED)
static IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr;
#endif
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+extern struct platform_device *gpsPVRLDMDev;
+#endif
+
IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32 Ioctl,
IMG_BYTE *pInBuf,
IMG_UINT32 InBufLen,
IMG_UINT32 OutBufLen,
IMG_UINT32 *pdwBytesTransferred);
-#if defined(DEBUG) && defined(DUMP_OMAP34xx_CLOCKS) && defined(__linux__)
-
-#pragma GCC diagnostic ignored "-Wstrict-prototypes"
-#include <mach/clock.h>
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
-#include <../mach-omap2/clock_34xx.h>
-#define ONCHIP_CLKS onchip_clks
-#else
-#include <../mach-omap2/clock34xx.h>
-#define ONCHIP_CLKS onchip_34xx_clks
-#endif
-
-static void omap3_clk_recalc(struct clk *clk) {}
-static void omap3_followparent_recalc(struct clk *clk) {}
-static void omap3_propagate_rate(struct clk *clk) {}
-static void omap3_table_recalc(struct clk *clk) {}
-static long omap3_round_to_table_rate(struct clk *clk, unsigned long rate) { return 0; }
-static int omap3_select_table_rate(struct clk *clk, unsigned long rate) { return 0; }
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
-static void omap3_dpll_recalc(struct clk *clk, unsigned long parent_rate,
- u8 rate_storage) {}
-static void omap3_clkoutx2_recalc(struct clk *clk, unsigned long parent_rate,
- u8 rate_storage) {}
-static void omap3_dpll_allow_idle(struct clk *clk) {}
-static void omap3_dpll_deny_idle(struct clk *clk) {}
-static u32 omap3_dpll_autoidle_read(struct clk *clk) { return 0; }
-static int omap3_noncore_dpll_enable(struct clk *clk) { return 0; }
-static void omap3_noncore_dpll_disable(struct clk *clk) {}
-static int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate) { return 0; }
-static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) { return 0; }
-void followparent_recalc(struct clk *clk, unsigned long new_parent_rate,
- u8 rate_storage) {}
-long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate) { return 0; }
-void omap2_clksel_recalc(struct clk *clk, unsigned long new_parent_rate,
- u8 rate_storage) {}
-long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate) { return 0; }
-int omap2_clksel_set_rate(struct clk *clk, unsigned long rate) { return 0; }
-void omap2_fixed_divisor_recalc(struct clk *clk, unsigned long new_parent_rate,
- u8 rate_storage) {}
-void omap2_init_clksel_parent(struct clk *clk) {}
-#endif
-
-static void dump_omap34xx_clocks(void)
-{
- struct clk **c;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
- struct vdd_prcm_config *t1 = vdd1_rate_table;
- struct vdd_prcm_config *t2 = vdd2_rate_table;
-
- t1 = t1;
- t2 = t2;
-#else
-
- omap3_dpll_allow_idle(0);
- omap3_dpll_deny_idle(0);
- omap3_dpll_autoidle_read(0);
- omap3_clk_recalc(0);
- omap3_followparent_recalc(0);
- omap3_propagate_rate(0);
- omap3_table_recalc(0);
- omap3_round_to_table_rate(0, 0);
- omap3_select_table_rate(0, 0);
-#endif
-
- for(c = ONCHIP_CLKS; c < ONCHIP_CLKS + ARRAY_SIZE(ONCHIP_CLKS); c++)
- {
- struct clk *cp = *c, *copy;
- unsigned long rate;
- copy = clk_get(NULL, cp->name);
- if(!copy)
- continue;
- rate = clk_get_rate(copy);
- if (rate < 1000000)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: clock %s is %lu KHz (%lu Hz)", __func__, cp->name, rate/1000, rate));
- }
- else
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: clock %s is %lu MHz (%lu Hz)", __func__, cp->name, rate/1000000, rate));
- }
- }
-}
-
-#else
-
-static INLINE void dump_omap34xx_clocks(void) {}
-
-#endif
-
#if defined(SGX_OCP_REGS_ENABLED)
-#define SYS_OMAP3430_OCP_REGS_SYS_PHYS_BASE (SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE + EUR_CR_OCP_REVISION)
-#define SYS_OMAP3430_OCP_REGS_SIZE 0x110
-
static IMG_CPU_VIRTADDR gpvOCPRegsLinAddr;
static PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
{
PVRSRV_ERROR eError = EnableSGXClocks(psSysData);
+#if !defined(SGX_OCP_NO_INT_BYPASS)
if(eError == PVRSRV_OK)
{
- OSWriteHWReg(gpvOCPRegsLinAddr,
- EUR_CR_OCP_DEBUG_CONFIG - EUR_CR_OCP_REVISION,
- EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_SYSCONFIG, 0x14);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_DEBUG_CONFIG, EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
}
-
+#endif
return eError;
}
if(eError == PVRSRV_OK)
{
- EnableSGXClocksWrap(psSysData);
+ eError = EnableSGXClocksWrap(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ DisableSystemClocks(psSysData);
+ }
}
#endif
#if defined(NO_HARDWARE)
PVRSRV_ERROR eError;
IMG_CPU_PHYADDR sCpuPAddr;
+#else
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+ struct resource *dev_res;
+ int dev_irq;
+#endif
#endif
PVR_UNREFERENCED_PARAMETER(psSysData);
#if defined(NO_HARDWARE)
- eError = OSBaseAllocContigMemory(SYS_OMAP3430_SGX_REGS_SIZE,
+ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
+
+ eError = OSBaseAllocContigMemory(gsSGXDeviceMap.ui32RegsSize,
&gsSGXRegsCPUVAddr,
&sCpuPAddr);
if(eError != PVRSRV_OK)
}
gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;
gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);
- gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
#if defined(__linux__)
gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL;
#endif
- OSMemSet(gsSGXRegsCPUVAddr, 0, SYS_OMAP3430_SGX_REGS_SIZE);
+ OSMemSet(gsSGXRegsCPUVAddr, 0, gsSGXDeviceMap.ui32RegsSize);
gsSGXDeviceMap.ui32IRQ = 0;
#else
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+
+ dev_res = platform_get_resource(gpsPVRLDMDev, IORESOURCE_MEM, 0);
+ if (dev_res == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_resource failed", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+ dev_irq = platform_get_irq(gpsPVRLDMDev, 0);
+ if (dev_irq < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_irq failed (%d)", __FUNCTION__, -dev_irq));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ gsSGXDeviceMap.sRegsSysPBase.uiAddr = dev_res->start;
+ gsSGXDeviceMap.sRegsCpuPBase =
+ SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
+ PVR_TRACE(("SGX register base: 0x%lx", (unsigned long)gsSGXDeviceMap.sRegsCpuPBase.uiAddr));
+
+ gsSGXDeviceMap.ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start);
+ PVR_TRACE(("SGX register size: %d",gsSGXDeviceMap.ui32RegsSize));
+
+ gsSGXDeviceMap.ui32IRQ = dev_irq;
+ PVR_TRACE(("SGX IRQ: %d", gsSGXDeviceMap.ui32IRQ));
+#else
gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE;
gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
gsSGXDeviceMap.ui32IRQ = SYS_OMAP3430_SGX_IRQ;
+#endif
+#if defined(SGX_OCP_REGS_ENABLED)
+ gsSGXRegsCPUVAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ if (gsSGXRegsCPUVAddr == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Failed to map SGX registers"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+ gpvOCPRegsLinAddr = gsSGXRegsCPUVAddr;
+#endif
#endif
#if defined(PDUMP)
}
-IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion)
+static IMG_CHAR *SysCreateVersionString(void)
{
static IMG_CHAR aszVersionString[100];
SYS_DATA *psSysData;
#if !defined(NO_HARDWARE)
IMG_VOID *pvRegsLinAddr;
- pvRegsLinAddr = OSMapPhysToLin(sRegRegion,
+ pvRegsLinAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
SYS_OMAP3430_SGX_REGS_SIZE,
PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
IMG_NULL);
IMG_UINT32 i;
PVRSRV_ERROR eError;
PVRSRV_DEVICE_NODE *psDeviceNode;
+#if !defined(PVR_NO_OMAP_TIMER)
IMG_CPU_PHYADDR TimerRegPhysBase;
+#endif
#if !defined(SGX_DYNAMIC_TIMING_INFO)
SGX_TIMING_INFORMATION* psTimingInfo;
#endif
return eError;
}
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE;
- gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
- gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
- OSReservePhys(TimerRegPhysBase,
- 4,
- PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
- (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
- &gpsSysData->hSOCTimerRegisterOSMemHandle);
-
#if !defined(SGX_DYNAMIC_TIMING_INFO)
psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
return eError;
}
SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);
-
-#if defined(SGX_OCP_REGS_ENABLED)
+#if 0
+ eError = SysPMRuntimeRegister();
+ if (eError != PVRSRV_OK)
{
- IMG_SYS_PHYADDR sOCPRegsSysPBase;
- IMG_CPU_PHYADDR sOCPRegsCpuPBase;
-
- sOCPRegsSysPBase.uiAddr = SYS_OMAP3430_OCP_REGS_SYS_PHYS_BASE;
- sOCPRegsCpuPBase = SysSysPAddrToCpuPAddr(sOCPRegsSysPBase);
-
- gpvOCPRegsLinAddr = OSMapPhysToLin(sOCPRegsCpuPBase,
- SYS_OMAP3430_OCP_REGS_SIZE,
- PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
- IMG_NULL);
-
- if (gpvOCPRegsLinAddr == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to map OCP registers"));
- return PVRSRV_ERROR_BAD_MAPPING;
- }
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register with OSPM!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
}
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME);
#endif
-
}
#endif
- dump_omap34xx_clocks();
-
eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);
if (eError != PVRSRV_OK)
{
DisableSGXClocks(gpsSysData);
#endif
+#if !defined(PVR_NO_OMAP_TIMER)
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ TimerRegPhysBase = gsSysSpecificData.sTimerRegPhysBase;
+#else
+ TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE;
+#endif
+ gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
+ gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
+ if (TimerRegPhysBase.uiAddr != 0)
+ {
+ OSReservePhys(TimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
+ &gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+#endif
+
return PVRSRV_OK;
}
eError = EnableSGXClocksWrap(gpsSysData);
if (eError != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
- (IMG_VOID)SysDeinitialise(gpsSysData);
- gpsSysData = IMG_NULL;
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to Enable SGX clocks (%d)", eError));
return eError;
}
#endif
-#if defined(SYS_USING_INTERRUPTS)
-
eError = OSInstallMISR(gpsSysData);
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install MISR"));
- (IMG_VOID)SysDeinitialise(gpsSysData);
- gpsSysData = IMG_NULL;
return eError;
}
SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR);
+#if defined(SYS_USING_INTERRUPTS)
eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install ISR"));
- (IMG_VOID)SysDeinitialise(gpsSysData);
- gpsSysData = IMG_NULL;
return eError;
}
SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
#endif
+#if defined(__linux__)
- gpsSysData->pszVersionString = SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
+ gpsSysData->pszVersionString = SysCreateVersionString();
if (!gpsSysData->pszVersionString)
{
PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string"));
}
else
{
- PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
+ PVR_TRACE(("SysFinalise: Version string: %s", gpsSysData->pszVersionString));
}
+#endif
#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
{
PVRSRV_ERROR eError;
+ if(gpsSysData->pvSOCTimerRegisterKM)
+ {
+ OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+
#if defined(SYS_USING_INTERRUPTS)
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
{
return eError;
}
}
+#endif
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR))
{
return eError;
}
}
-#else
- PVR_UNREFERENCED_PARAMETER(psSysData);
-#endif
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV))
{
return eError;
}
}
-
-#if defined(SGX_OCP_REGS_ENABLED)
- OSUnMapPhysToLin(gpvOCPRegsLinAddr,
- SYS_OMAP3430_OCP_REGS_SIZE,
- PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
- IMG_NULL);
+#if 0
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME))
+ {
+ eError = SysPMRuntimeUnregister();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to unregister with OSPM!"));
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ }
#endif
-
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
}
}
- if(gpsSysData->pvSOCTimerRegisterKM)
- {
- OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
- 4,
- PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
- gpsSysData->hSOCTimerRegisterOSMemHandle);
- }
-
SysDeinitialiseCommon(gpsSysData);
-#if defined(NO_HARDWARE)
- if(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV))
+#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED)
+ if(gsSGXRegsCPUVAddr != IMG_NULL)
{
+#if defined(NO_HARDWARE)
OSBaseFreeContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase);
- }
+#else
+#if defined(SGX_OCP_REGS_ENABLED)
+ OSUnMapPhysToLin(gsSGXRegsCPUVAddr,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ gpvOCPRegsLinAddr = IMG_NULL;
#endif
+#endif
+ gsSGXRegsCPUVAddr = IMG_NULL;
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+ }
+#endif
gpsSysSpecificData->ui32SysSpecificData = 0;
IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
{
- PVR_UNREFERENCED_PARAMETER(psSysData);
PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
-
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#else
+#if defined(SGX_OCP_NO_INT_BYPASS)
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1);
+#endif
- OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM,
- EUR_CR_EVENT_HOST_CLEAR);
+ OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR);
+#endif
}
+#if defined(SGX_OCP_NO_INT_BYPASS)
+IMG_VOID SysEnableSGXInterrupts(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
+ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_LISR) && !SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_SET_2, 0x1);
+ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
+ }
+}
+
+IMG_VOID SysDisableSGXInterrupts(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
+
+ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_CLR_2, 0x1);
+ SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
+ }
+}
+#endif
PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
{
#if !defined(__SOCCONFIG_H__)
#define __SOCCONFIG_H__
-#include "syscommon.h"
#define VS_PRODUCT_NAME "OMAP3"
-#if defined(SGX530) && (SGX_CORE_REV == 125)
-#define SYS_SGX_CLOCK_SPEED 200000000
-#else
#define SYS_SGX_CLOCK_SPEED 110666666
-#endif
#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
#define SYS_SGX_PDS_TIMER_FREQ (1000)
#if !defined(SYS_SGX_ACTIVE_POWER_LATENCY_MS)
-#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (1)
+#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (2)
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#if defined(PVR_LINUX_USING_WORKQUEUES)
#define MAX_HW_TIME_US (1000000)
+#define WAIT_TRY_COUNT (20000)
#else
#define MAX_HW_TIME_US (500000)
+#define WAIT_TRY_COUNT (10000)
#endif
-#define WAIT_TRY_COUNT (10000)
-#define SYS_DEVICE_COUNT 3
+#define SYS_DEVICE_COUNT 15
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#endif
#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if !defined(LDM_PLATFORM)
+#error "LDM_PLATFORM must be set"
+#endif
+/*#define PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO*/
+//#include <linux/platform_device.h>
+#endif
+
+#if ((defined(DEBUG) || defined(TIMING)) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))) && \
+ !defined(PVR_NO_OMAP_TIMER)
+#define PVR_OMAP3_TIMING_PRCM
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*#include <plat/gpu.h>*/
+#if !defined(PVR_NO_OMAP_TIMER)
+/*#define PVR_OMAP_USE_DM_TIMER_API*/
+//#include <plat/dmtimer.h>
+#endif
+#endif
+
+#if !defined(PVR_NO_OMAP_TIMER)
+//#define PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA
+#endif
#endif
+#if !defined(NO_HARDWARE) && \
+ defined(SYS_USING_INTERRUPTS) && \
+ defined(SGX540)
+#define SGX_OCP_REGS_ENABLED
+#endif
+
+#if defined(__linux__)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) && defined(SGX_OCP_REGS_ENABLED)
+/*#define SGX_OCP_NO_INT_BYPASS*/
+#endif
+#endif
+
#if defined (__cplusplus)
extern "C" {
#endif
-IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion);
-
IMG_VOID DisableSystemClocks(SYS_DATA *psSysData);
PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData);
#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00000200
#define SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS 0x00000400
+#define SYS_SPECIFIC_DATA_ENABLE_OCPREGS 0x00000800
+#define SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME 0x00001000
+#if defined(SGX_OCP_REGS_ENABLED) && defined(SGX_OCP_NO_INT_BYPASS)
+#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00002000
+#endif
#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
IMG_UINT32 ui32SysSpecificData;
PVRSRV_DEVICE_NODE *psSGXDevNode;
IMG_BOOL bSGXInitComplete;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ IMG_CPU_PHYADDR sTimerRegPhysBase;
+#endif
#if !defined(__linux__)
IMG_BOOL bSGXClocksEnabled;
#endif
atomic_t sNotifyLockCPU;
IMG_BOOL bCallVDD2PostFunc;
#endif
- struct clk *psCORE_CK;
- struct clk *psSGX_FCK;
- struct clk *psSGX_ICK;
- struct clk *psMPU_CK;
+ struct clk *psCORE_CK;
+ struct clk *psSGX_FCK;
+ struct clk *psSGX_ICK;
+
#if defined(DEBUG) || defined(TIMING)
struct clk *psGPT11_FCK;
struct clk *psGPT11_ICK;
#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
- struct constraint_handle *pVdd2Handle;
-#endif
+#if defined(PVR_OMAP_USE_DM_TIMER_API)
+ struct omap_dm_timer *psGPTimer;
+#endif
#endif
} SYS_SPECIFIC_DATA;
extern SYS_SPECIFIC_DATA *gpsSysSpecificData;
+#if defined(SGX_OCP_REGS_ENABLED) && defined(SGX_OCP_NO_INT_BYPASS)
+IMG_VOID SysEnableSGXInterrupts(SYS_DATA* psSysData);
+IMG_VOID SysDisableSGXInterrupts(SYS_DATA* psSysData);
+#else
+#define SysEnableSGXInterrupts(psSysData)
+#define SysDisableSGXInterrupts(psSysData)
+#endif
+
#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
#endif
+#if defined(__linux__)
+
+PVRSRV_ERROR SysPMRuntimeRegister(void);
+PVRSRV_ERROR SysPMRuntimeUnregister(void);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysPMRuntimeRegister)
+#endif
+static INLINE PVRSRV_ERROR SysPMRuntimeRegister(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysPMRuntimeUnregister)
+#endif
+static INLINE PVRSRV_ERROR SysPMRuntimeUnregister(void)
+{
+ return PVRSRV_OK;
+}
+
+#endif
+
#if defined(__cplusplus)
}
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
******************************************************************************/
#if defined(__linux__)
-#if defined(PVR_LINUX_USING_WORKQUEUES)
-#include "sysutils_linux_wqueue_compat.c"
-#else
#include "sysutils_linux.c"
#endif
-#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hardirq.h>
-#include <linux/spinlock.h>
-#include <asm/bug.h>
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
-#include <linux/semaphore.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31))
-#include <plat/resource.h>
-#else
-#include <mach/resource.h>
-#endif
-#else
-#include <asm/semaphore.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
-#include <asm/arch/resource.h>
-#endif
-#endif
+#include <linux/mutex.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22)) && \
- (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
-#define CONSTRAINT_NOTIFICATIONS
-#endif
#include "sgxdefs.h"
#include "services_headers.h"
#include "sysinfo.h"
#include "sgxinfokm.h"
#include "syslocal.h"
+//#include <linux/platform_device.h>
+//#include <linux/pm_runtime.h>
+
#define ONE_MHZ 1000000
#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
#define SGX_PARENT_CLOCK "core_ck"
#endif
-#if !defined(PDUMP) && !defined(NO_HARDWARE)
-static IMG_BOOL PowerLockWrappedOnCPU(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- IMG_INT iCPU;
- IMG_BOOL bLocked = IMG_FALSE;
-
- if (!in_interrupt())
- {
- iCPU = get_cpu();
- bLocked = (iCPU == atomic_read(&psSysSpecData->sPowerLockCPU));
-
- put_cpu();
- }
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+extern struct platform_device *gpsPVRLDMDev;
+#endif
- return bLocked;
-}
-static IMG_VOID PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData)
+static PVRSRV_ERROR PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData, IMG_BOOL bTryLock)
{
- IMG_INT iCPU;
-
- if (!in_interrupt())
- {
-
- iCPU = get_cpu();
-
-
- PVR_ASSERT(iCPU != -1);
-
- PVR_ASSERT(!PowerLockWrappedOnCPU(psSysSpecData));
-
- spin_lock(&psSysSpecData->sPowerLock);
-
- atomic_set(&psSysSpecData->sPowerLockCPU, iCPU);
- }
+ if (!in_interrupt())
+ {
+ if (bTryLock)
+ {
+ int locked = mutex_trylock(&psSysSpecData->sPowerLock);
+ if (locked == 0)
+ {
+ return PVRSRV_ERROR_RETRY;
+ }
+ }
+ else
+ {
+ mutex_lock(&psSysSpecData->sPowerLock);
+ }
+ }
+
+ return PVRSRV_OK;
}
static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA *psSysSpecData)
{
- if (!in_interrupt())
- {
- PVR_ASSERT(PowerLockWrappedOnCPU(psSysSpecData));
-
- atomic_set(&psSysSpecData->sPowerLockCPU, -1);
-
- spin_unlock(&psSysSpecData->sPowerLock);
-
- put_cpu();
- }
+ if (!in_interrupt())
+ {
+ mutex_unlock(&psSysSpecData->sPowerLock);
+ }
}
-PVRSRV_ERROR SysPowerLockWrap(SYS_DATA *psSysData)
+PVRSRV_ERROR SysPowerLockWrap(IMG_BOOL bTryLock)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ SYS_DATA *psSysData;
- PowerLockWrap(psSysSpecData);
+ SysAcquireData(&psSysData);
- return PVRSRV_OK;
+ return PowerLockWrap(psSysData->pvSysSpecificData, bTryLock);
}
-IMG_VOID SysPowerLockUnwrap(SYS_DATA *psSysData)
+IMG_VOID SysPowerLockUnwrap(IMG_VOID)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ SYS_DATA *psSysData;
- PowerLockUnwrap(psSysSpecData);
-}
-#else
-static IMG_BOOL PowerLockWrappedOnCPU(SYS_SPECIFIC_DATA unref__ *psSysSpecData)
-{
- return IMG_FALSE;
-}
+ SysAcquireData(&psSysData);
-static IMG_VOID PowerLockWrap(SYS_SPECIFIC_DATA unref__ *psSysSpecData)
-{
+ PowerLockUnwrap(psSysData->pvSysSpecificData);
}
-static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA unref__ *psSysSpecData)
-{
-}
-PVRSRV_ERROR SysPowerLockWrap(SYS_DATA unref__ *psSysData)
-{
- return PVRSRV_OK;
-}
-IMG_VOID SysPowerLockUnwrap(SYS_DATA unref__ *psSysData)
-{
-}
-#endif
IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
{
- IMG_BOOL bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
-
- if (bPowerLock)
- {
- PowerLockUnwrap(psSysSpecData);
- }
-
- return bPowerLock;
+ return IMG_TRUE;
}
IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
{
- PowerLockWrap(psSysSpecData);
}
static inline IMG_UINT32 scale_by_rate(IMG_UINT32 val, IMG_UINT32 rate1, IMG_UINT32 rate2)
{
IMG_UINT32 rate;
-#if defined(NO_HARDWARE)
rate = SYS_SGX_CLOCK_SPEED;
-#else
+#if !defined(NO_HARDWARE)
PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
-
- rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
- PVR_ASSERT(rate != 0);
#endif
psTimingInfo->ui32CoreClockSpeed = rate;
psTimingInfo->ui32HWRecoveryFreq = scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);
psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
}
-#if defined(CONSTRAINT_NOTIFICATIONS)
-#if !defined(SGX_DYNAMIC_TIMING_INFO)
-#error "SGX_DYNAMIC_TIMING_INFO must be defined for this platform"
-#endif
-
-static struct constraint_id cnstr_id_vdd2 = {
- .type = RES_OPP_CO,
- .data = (IMG_VOID *)"vdd2_opp"
-};
-
-#if !defined(PDUMP) && !defined(NO_HARDWARE)
-static inline IMG_BOOL ConstraintNotificationsEnabled(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- return (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0) && psSysSpecData->bSGXInitComplete && psSysSpecData->bConstraintNotificationsEnabled;
-
-}
-
-static IMG_BOOL NotifyLockedOnCPU(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- IMG_INT iCPU = get_cpu();
- IMG_BOOL bLocked = (iCPU == atomic_read(&psSysSpecData->sNotifyLockCPU));
-
- put_cpu();
-
- return bLocked;
-}
-
-static IMG_VOID NotifyLock(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- IMG_INT iCPU;
-
- BUG_ON(in_interrupt());
-
-
- iCPU = get_cpu();
-
-
- PVR_ASSERT(iCPU != -1);
-
- PVR_ASSERT(!NotifyLockedOnCPU(psSysSpecData));
-
- spin_lock(&psSysSpecData->sNotifyLock);
-
- atomic_set(&psSysSpecData->sNotifyLockCPU, iCPU);
-}
-
-static IMG_VOID NotifyUnlock(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- PVR_ASSERT(NotifyLockedOnCPU(psSysSpecData));
-
- atomic_set(&psSysSpecData->sNotifyLockCPU, -1);
-
- spin_unlock(&psSysSpecData->sNotifyLock);
-
- put_cpu();
-}
-
-static int VDD2PostFunc(struct notifier_block *n, unsigned long event, IMG_VOID *ptr)
-{
- PVR_UNREFERENCED_PARAMETER(n);
- PVR_UNREFERENCED_PARAMETER(event);
- PVR_UNREFERENCED_PARAMETER(ptr);
-
- if (in_interrupt())
- {
- PVR_DPF((PVR_DBG_ERROR, "%s Called in interrupt context. Ignoring.", __FUNCTION__));
- return 0;
- }
-
-
- if (!NotifyLockedOnCPU(gpsSysSpecificData))
- {
- return 0;
- }
-
-#if defined(DEBUG)
- if (ConstraintNotificationsEnabled(gpsSysSpecificData))
- {
- IMG_UINT32 rate;
-
- rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
-
- PVR_ASSERT(rate != 0);
-
- PVR_DPF((PVR_DBG_MESSAGE, "%s: SGX clock rate: %dMHz", __FUNCTION__, HZ_TO_MHZ(rate)));
- }
-#endif
- if (gpsSysSpecificData->bCallVDD2PostFunc)
- {
- PVRSRVDevicePostClockSpeedChange(gpsSysSpecificData->psSGXDevNode->sDevId.ui32DeviceIndex, IMG_TRUE, IMG_NULL);
-
- gpsSysSpecificData->bCallVDD2PostFunc = IMG_FALSE;
- }
- else
- {
- if (ConstraintNotificationsEnabled(gpsSysSpecificData))
- {
- PVR_TRACE(("%s: Not calling PVR clock speed notification functions", __FUNCTION__));
- }
- }
-
- NotifyUnlock(gpsSysSpecificData);
-
- return 0;
-}
-
-static int VDD2PreFunc(struct notifier_block *n, unsigned long event, IMG_VOID *ptr)
-{
- PVR_UNREFERENCED_PARAMETER(n);
- PVR_UNREFERENCED_PARAMETER(event);
- PVR_UNREFERENCED_PARAMETER(ptr);
-
- if (in_interrupt())
- {
- PVR_DPF((PVR_DBG_WARNING, "%s Called in interrupt context. Ignoring.", __FUNCTION__));
- return 0;
- }
-
- if (PowerLockWrappedOnCPU(gpsSysSpecificData))
- {
- PVR_DPF((PVR_DBG_WARNING, "%s Called from within a power transition. Ignoring.", __FUNCTION__));
- return 0;
- }
-
- NotifyLock(gpsSysSpecificData);
-
- PVR_ASSERT(!gpsSysSpecificData->bCallVDD2PostFunc);
-
- if (ConstraintNotificationsEnabled(gpsSysSpecificData))
- {
- PVRSRV_ERROR eError;
-
- eError = PVRSRVDevicePreClockSpeedChange(gpsSysSpecificData->psSGXDevNode->sDevId.ui32DeviceIndex, IMG_TRUE, IMG_NULL);
-
- gpsSysSpecificData->bCallVDD2PostFunc = (eError == PVRSRV_OK);
-
- }
-
- return 0;
-}
-
-static struct notifier_block sVDD2Pre = {
- VDD2PreFunc,
- NULL
-};
-
-static struct notifier_block sVDD2Post = {
- VDD2PostFunc,
- NULL
-};
-
-static IMG_VOID RegisterConstraintNotifications(IMG_VOID)
-{
- PVR_TRACE(("Registering constraint notifications"));
-
- PVR_ASSERT(!gpsSysSpecificData->bConstraintNotificationsEnabled);
-
- constraint_register_pre_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Pre,
- max_vdd2_opp+1);
-
- constraint_register_post_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Post,
- max_vdd2_opp+1);
-
-
- NotifyLock(gpsSysSpecificData);
- gpsSysSpecificData->bConstraintNotificationsEnabled = IMG_TRUE;
- NotifyUnlock(gpsSysSpecificData);
-
- PVR_TRACE(("VDD2 constraint notifications registered"));
-}
-
-static IMG_VOID UnRegisterConstraintNotifications(IMG_VOID)
-{
- PVR_TRACE(("Unregistering constraint notifications"));
-
-
- NotifyLock(gpsSysSpecificData);
- gpsSysSpecificData->bConstraintNotificationsEnabled = IMG_FALSE;
- NotifyUnlock(gpsSysSpecificData);
-
-
- constraint_unregister_pre_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Pre,
- max_vdd2_opp+1);
-
- constraint_unregister_post_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Post,
- max_vdd2_opp+1);
-}
-#else
-static IMG_VOID RegisterConstraintNotifications(IMG_VOID)
-{
-}
-
-static IMG_VOID UnRegisterConstraintNotifications(IMG_VOID)
-{
-}
-#endif
-#endif
-
PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
{
#if !defined(NO_HARDWARE)
SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- long lNewRate;
- long lRate;
IMG_INT res;
-
+ long lRate,lNewRate;
if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
{
}
PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));
-
-#if defined(DEBUG)
- {
-
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psMPU_CK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: CPU Clock is %dMhz", HZ_TO_MHZ(rate)));
- }
-#endif
-
- res = clk_enable(psSysSpecData->psSGX_FCK);
+
+ res=clk_enable(psSysSpecData->psSGX_FCK);
if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+ }
- res = clk_enable(psSysSpecData->psSGX_ICK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", res));
+ res=clk_enable(psSysSpecData->psSGX_ICK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", res));
- clk_disable(psSysSpecData->psSGX_FCK);
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
+ clk_disable(psSysSpecData->psSGX_FCK);
+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+ }
lNewRate = clk_round_rate(psSysSpecData->psSGX_FCK, SYS_SGX_CLOCK_SPEED + ONE_MHZ);
- if (lNewRate <= 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));
- return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE;
- }
-
-
- lRate = clk_get_rate(psSysSpecData->psSGX_FCK);
- if (lRate != lNewRate)
- {
- res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res));
- }
- }
+ if (lNewRate <= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));
+ return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE;
+ }
+
+
+ lRate = clk_get_rate(psSysSpecData->psSGX_FCK);
+ if (lRate != lNewRate)
+ {
+ res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res));
+ return PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE;
+ }
+ }
#if defined(DEBUG)
+ {
+ IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
+ PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
+ }
+#endif
+
+
+
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
{
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
+
+// int res = pm_runtime_get_sync(&gpsPVRLDMDev->dev);
+// if (res < 0)
+// {
+// PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: pm_runtime_get_sync failed (%d)", -res));
+// return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+// }
}
#endif
+// SysEnableSGXInterrupts(psSysData);
atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);
#if !defined(NO_HARDWARE)
SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
-
if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)
{
return;
}
-
+
PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));
+
+ clk_disable(psSysSpecData->psSGX_FCK);
- if (psSysSpecData->psSGX_ICK)
- {
- clk_disable(psSysSpecData->psSGX_ICK);
- }
+ clk_disable(psSysSpecData->psSGX_ICK);
- if (psSysSpecData->psSGX_FCK)
+// SysDisableSGXInterrupts(psSysData);
+
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
{
- clk_disable(psSysSpecData->psSGX_FCK);
+// int res = pm_runtime_put_sync(&gpsPVRLDMDev->dev);
+// if (res < 0)
+// {
+// PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: pm_runtime_put_sync failed (%d)", -res));
+// }
}
+#endif
-
atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
#else
#endif
}
-PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+#if (defined(DEBUG) || defined(TIMING)) && !defined(PVR_NO_OMAP_TIMER)
+#if defined(PVR_OMAP_USE_DM_TIMER_API)
+#define GPTIMER_TO_USE 11
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- struct clk *psCLK;
- IMG_INT res;
- PVRSRV_ERROR eError;
- IMG_BOOL bPowerLock;
+ PVR_ASSERT(psSysSpecData->psGPTimer == NULL);
-#if defined(DEBUG) || defined(TIMING)
- IMG_INT rate;
- struct clk *sys_ck;
- IMG_CPU_PHYADDR TimerRegPhysBase;
- IMG_HANDLE hTimerEnable;
- IMG_UINT32 *pui32TimerEnable;
+
+ psSysSpecData->psGPTimer = omap_dm_timer_request_specific(GPTIMER_TO_USE);
+ if (psSysSpecData->psGPTimer == NULL)
+ {
+
+ PVR_DPF((PVR_DBG_WARNING, "%s: omap_dm_timer_request_specific failed", __FUNCTION__));
+ return PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
+ }
-#endif
+
+ omap_dm_timer_set_source(psSysSpecData->psGPTimer, OMAP_TIMER_SRC_SYS_CLK);
+ omap_dm_timer_enable(psSysSpecData->psGPTimer);
- PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ omap_dm_timer_set_load_start(psSysSpecData->psGPTimer, 1, 0);
- if (!psSysSpecData->bSysClocksOneTimeInit)
- {
- bPowerLock = IMG_FALSE;
+ omap_dm_timer_start(psSysSpecData->psGPTimer);
- spin_lock_init(&psSysSpecData->sPowerLock);
- atomic_set(&psSysSpecData->sPowerLockCPU, -1);
- spin_lock_init(&psSysSpecData->sNotifyLock);
- atomic_set(&psSysSpecData->sNotifyLockCPU, -1);
+
+ psSysSpecData->sTimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE;
- atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+ return PVRSRV_OK;
+}
- psCLK = clk_get(NULL, SGX_PARENT_CLOCK);
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get Core Clock"));
- goto ExitError;
- }
- psSysSpecData->psCORE_CK = psCLK;
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ if (psSysSpecData->psGPTimer != NULL)
+ {
+
+ (void) omap_dm_timer_stop(psSysSpecData->psGPTimer);
- psCLK = clk_get(NULL, "sgx_fck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
- goto ExitError;
- }
- psSysSpecData->psSGX_FCK = psCLK;
+ omap_dm_timer_disable(psSysSpecData->psGPTimer);
- psCLK = clk_get(NULL, "sgx_ick");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get SGX Interface Clock"));
- goto ExitError;
- }
- psSysSpecData->psSGX_ICK = psCLK;
+ omap_dm_timer_free(psSysSpecData->psGPTimer);
-#if defined(DEBUG)
- psCLK = clk_get(NULL, "mpu_ck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get MPU Clock"));
- goto ExitError;
- }
- psSysSpecData->psMPU_CK = psCLK;
-#endif
- res = clk_set_parent(psSysSpecData->psSGX_FCK, psSysSpecData->psCORE_CK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set SGX parent clock (%d)", res));
- goto ExitError;
- }
+ psSysSpecData->sTimerRegPhysBase.uiAddr = 0;
- psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
- }
- else
- {
-
- bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
- if (bPowerLock)
- {
- PowerLockUnwrap(psSysSpecData);
- }
+ psSysSpecData->psGPTimer = NULL;
}
-#if defined(CONSTRAINT_NOTIFICATIONS)
- psSysSpecData->pVdd2Handle = constraint_get(PVRSRV_MODNAME, &cnstr_id_vdd2);
- if (IS_ERR(psSysSpecData->pVdd2Handle))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get VDD2 constraint handle"));
- goto ExitError;
- }
+}
+#else
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+#if defined(PVR_OMAP3_TIMING_PRCM)
+ struct clk *psCLK;
+ IMG_INT res;
+ struct clk *sys_ck;
+ IMG_INT rate;
+#endif
+ PVRSRV_ERROR eError;
- RegisterConstraintNotifications();
+ IMG_CPU_PHYADDR sTimerRegPhysBase;
+ IMG_HANDLE hTimerEnable;
+ IMG_UINT32 *pui32TimerEnable;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ PVR_ASSERT(psSysSpecData->sTimerRegPhysBase.uiAddr == 0);
#endif
-#if defined(DEBUG) || defined(TIMING)
+
+#if defined(PVR_OMAP3_TIMING_PRCM)
psCLK = clk_get(NULL, "gpt11_fck");
if (IS_ERR(psCLK))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
psSysSpecData->psGPT11_FCK = psCLK;
if (IS_ERR(psCLK))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
psSysSpecData->psGPT11_ICK = psCLK;
if (IS_ERR(sys_ck))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck)
if (res < 0)
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
}
-
rate = clk_get_rate(psSysSpecData->psGPT11_FCK);
PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate)));
if (res < 0)
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
res = clk_enable(psSysSpecData->psGPT11_ICK);
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
goto ExitDisableGPT11FCK;
}
+#endif
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
+ sTimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
&hTimerEnable);
goto ExitDisableGPT11ICK;
}
- rate = *pui32TimerEnable;
- if(!(rate & 4))
+ if(!(*pui32TimerEnable & 4))
{
PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
- *pui32TimerEnable = rate | 4;
+ *pui32TimerEnable |= 4;
}
OSUnMapPhysToLin(pui32TimerEnable,
hTimerEnable);
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
+ sTimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
&hTimerEnable);
goto ExitDisableGPT11ICK;
}
-
*pui32TimerEnable = 3;
OSUnMapPhysToLin(pui32TimerEnable,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
hTimerEnable);
-
-#endif
-
-#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
- PVR_TRACE(("EnableSystemClocks: Setting SGX OPP constraint"));
-
-
- res = constraint_set(psSysSpecData->pVdd2Handle, max_vdd2_opp);
- if (res != 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: constraint_set failed (%d)", res));
- goto ExitConstraintSetFailed;
- }
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ psSysSpecData->sTimerRegPhysBase = sTimerRegPhysBase;
#endif
+
eError = PVRSRV_OK;
+
goto Exit;
-#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
-ExitConstraintSetFailed:
-#endif
-#if defined(DEBUG) || defined(TIMING)
ExitDisableGPT11ICK:
+#if defined(PVR_OMAP3_TIMING_PRCM)
clk_disable(psSysSpecData->psGPT11_ICK);
ExitDisableGPT11FCK:
clk_disable(psSysSpecData->psGPT11_FCK);
-ExitUnRegisterConstraintNotifications:
-#endif
-#if defined(CONSTRAINT_NOTIFICATIONS)
- UnRegisterConstraintNotifications();
- constraint_put(psSysSpecData->pVdd2Handle);
-#endif
ExitError:
- eError = PVRSRV_ERROR_DISABLE_CLOCK_FAILURE;
+#endif
+ eError = PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
Exit:
- if (bPowerLock)
- {
- PowerLockWrap(psSysSpecData);
- }
-
return eError;
}
-IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- IMG_BOOL bPowerLock;
-#if defined(DEBUG) || defined(TIMING)
- IMG_CPU_PHYADDR TimerRegPhysBase;
IMG_HANDLE hTimerDisable;
IMG_UINT32 *pui32TimerDisable;
-#endif
-
- PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
-
-
- DisableSGXClocks(psSysData);
-
- bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
- if (bPowerLock)
- {
-
- PowerLockUnwrap(psSysSpecData);
- }
-
-#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
+ IMG_CPU_PHYADDR TimerRegPhysBase;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ if (psSysSpecData->sTimerRegPhysBase.uiAddr == 0)
{
- int res;
-
- PVR_TRACE(("DisableSystemClocks: Removing SGX OPP constraint"));
-
-
- res = constraint_remove(psSysSpecData->pVdd2Handle);
- if (res != 0)
- {
- PVR_DPF((PVR_DBG_WARNING, "DisableSystemClocks: constraint_remove failed (%d)", res));
- }
+ return;
}
#endif
-
-#if defined(CONSTRAINT_NOTIFICATIONS)
- UnRegisterConstraintNotifications();
-#endif
-
-#if defined(DEBUG) || defined(TIMING)
-
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
+ TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
hTimerDisable);
}
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ psSysSpecData->sTimerRegPhysBase.uiAddr = 0;
+#endif
+
+#if defined(PVR_OMAP3_TIMING_PRCM)
clk_disable(psSysSpecData->psGPT11_ICK);
clk_disable(psSysSpecData->psGPT11_FCK);
-
+#endif
+}
+#endif
+#else
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysSpecData);
+ return PVRSRV_OK;
+}
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysSpecData);
+}
#endif
-#if defined(CONSTRAINT_NOTIFICATIONS)
- constraint_put(psSysSpecData->pVdd2Handle);
-#endif
- if (bPowerLock)
+
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ struct clk *psCLK;
+ IMG_INT res;
+ PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ if (!psSysSpecData->bSysClocksOneTimeInit)
{
- PowerLockWrap(psSysSpecData);
+ mutex_init(&psSysSpecData->sPowerLock);
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+
+ psCLK = clk_get(NULL, SGX_PARENT_CLOCK);
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get Core Clock"));
+ return PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK;
+ }
+ psSysSpecData->psCORE_CK = psCLK;
+
+ psCLK = clk_get(NULL, "sgx_fck");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
+ return PVRSRV_ERROR_UNABLE_TO_GET_CLOCK;
+ }
+ psSysSpecData->psSGX_FCK = psCLK;
+
+ psCLK = clk_get(NULL, "sgx_ick");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get SGX Interface Clock"));
+ return PVRSRV_ERROR_UNABLE_TO_GET_CLOCK;
+ }
+ psSysSpecData->psSGX_ICK = psCLK;
+
+ res = clk_set_parent(psSysSpecData->psSGX_FCK, psSysSpecData->psCORE_CK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set SGX parent clock (%d)", res));
+ return PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK;
+ }
+
+
+ psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
}
+
+ return AcquireGPTimer(psSysSpecData);
+}
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
+
+ DisableSGXClocks(psSysData);
+
+ ReleaseGPTimer(psSysSpecData);
+
+}
+
+PVRSRV_ERROR SysPMRuntimeRegister(void)
+{
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+// pm_runtime_enable(&gpsPVRLDMDev->dev);
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysPMRuntimeUnregister(void)
+{
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+// pm_runtime_disable(&gpsPVRLDMDev->dev);
+#endif
+ return PVRSRV_OK;
}
+++ /dev/null
-/**********************************************************************
- *
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful but, except
- * as otherwise stated in writing, without any warranty; without even the
- * implied warranty of merchantability or fitness for a particular purpose.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
- *
- ******************************************************************************/
-
-#include <linux/version.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/hardirq.h>
-#include <linux/mutex.h>
-
-#include "sgxdefs.h"
-#include "services_headers.h"
-#include "sysinfo.h"
-#include "sgxapi_km.h"
-#include "sysconfig.h"
-#include "sgxinfokm.h"
-#include "syslocal.h"
-
-#if !defined(PVR_LINUX_USING_WORKQUEUES)
-#error "PVR_LINUX_USING_WORKQUEUES must be defined"
-#endif
-
-#define ONE_MHZ 1000000
-#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
-
-#if defined(SUPPORT_OMAP3430_SGXFCLK_96M)
-#define SGX_PARENT_CLOCK "cm_96m_fck"
-#else
-#define SGX_PARENT_CLOCK "core_ck"
-#endif
-
-static PVRSRV_ERROR PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData, IMG_BOOL bTryLock)
-{
- if (!in_interrupt())
- {
- if (bTryLock)
- {
- int locked = mutex_trylock(&psSysSpecData->sPowerLock);
- if (locked == 0)
- {
- return PVRSRV_ERROR_RETRY;
- }
- }
- else
- {
- mutex_lock(&psSysSpecData->sPowerLock);
- }
- }
-
- return PVRSRV_OK;
-}
-
-static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- if (!in_interrupt())
- {
- mutex_unlock(&psSysSpecData->sPowerLock);
- }
-}
-
-PVRSRV_ERROR SysPowerLockWrap(IMG_BOOL bTryLock)
-{
- SYS_DATA *psSysData;
-
- SysAcquireData(&psSysData);
-
- return PowerLockWrap(psSysData->pvSysSpecificData, bTryLock);
-}
-
-IMG_VOID SysPowerLockUnwrap(IMG_VOID)
-{
- SYS_DATA *psSysData;
-
- SysAcquireData(&psSysData);
-
- PowerLockUnwrap(psSysData->pvSysSpecificData);
-}
-
-
-IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- return IMG_TRUE;
-}
-
-IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
-{
-}
-
-static inline IMG_UINT32 scale_by_rate(IMG_UINT32 val, IMG_UINT32 rate1, IMG_UINT32 rate2)
-{
- if (rate1 >= rate2)
- {
- return val * (rate1 / rate2);
- }
-
- return val / (rate2 / rate1);
-}
-
-static inline IMG_UINT32 scale_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)
-{
- return scale_by_rate(val, rate, SYS_SGX_CLOCK_SPEED);
-}
-
-static inline IMG_UINT32 scale_inv_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)
-{
- return scale_by_rate(val, SYS_SGX_CLOCK_SPEED, rate);
-}
-
-IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psTimingInfo)
-{
- IMG_UINT32 rate;
-
-#if defined(NO_HARDWARE)
- rate = SYS_SGX_CLOCK_SPEED;
-#else
- PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
-
- rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
- PVR_ASSERT(rate != 0);
-#endif
- psTimingInfo->ui32CoreClockSpeed = rate;
- psTimingInfo->ui32HWRecoveryFreq = scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);
- psTimingInfo->ui32uKernelFreq = scale_prop_to_SGX_clock(SYS_SGX_PDS_TIMER_FREQ, rate);
-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
- psTimingInfo->bEnableActivePM = IMG_TRUE;
-#else
- psTimingInfo->bEnableActivePM = IMG_FALSE;
-#endif
- psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
-}
-
-PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
-{
-#if !defined(NO_HARDWARE)
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- long lNewRate;
- long lRate;
- IMG_INT res;
-
-
- if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
- {
- return PVRSRV_OK;
- }
-
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));
-
-#if defined(DEBUG)
- {
-
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psMPU_CK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: CPU Clock is %dMhz", HZ_TO_MHZ(rate)));
- }
-#endif
-
- res = clk_enable(psSysSpecData->psSGX_FCK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
-
- res = clk_enable(psSysSpecData->psSGX_ICK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", res));
-
- clk_disable(psSysSpecData->psSGX_FCK);
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
-
- lNewRate = clk_round_rate(psSysSpecData->psSGX_FCK, SYS_SGX_CLOCK_SPEED + ONE_MHZ);
- if (lNewRate <= 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));
- return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE;
- }
-
-
- lRate = clk_get_rate(psSysSpecData->psSGX_FCK);
- if (lRate != lNewRate)
- {
- res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res));
- }
- }
-
-#if defined(DEBUG)
- {
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
- }
-#endif
-
-
- atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);
-
-#else
- PVR_UNREFERENCED_PARAMETER(psSysData);
-#endif
- return PVRSRV_OK;
-}
-
-
-IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
-{
-#if !defined(NO_HARDWARE)
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
-
-
- if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)
- {
- return;
- }
-
- PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));
-
- if (psSysSpecData->psSGX_ICK)
- {
- clk_disable(psSysSpecData->psSGX_ICK);
- }
-
- if (psSysSpecData->psSGX_FCK)
- {
- clk_disable(psSysSpecData->psSGX_FCK);
- }
-
-
- atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
-
-#else
- PVR_UNREFERENCED_PARAMETER(psSysData);
-#endif
-}
-
-PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
-{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- struct clk *psCLK;
- IMG_INT res;
- PVRSRV_ERROR eError;
-
-#if defined(DEBUG) || defined(TIMING)
- IMG_INT rate;
- struct clk *sys_ck;
- IMG_CPU_PHYADDR TimerRegPhysBase;
- IMG_HANDLE hTimerEnable;
- IMG_UINT32 *pui32TimerEnable;
-
-#endif
-
- PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
-
- if (!psSysSpecData->bSysClocksOneTimeInit)
- {
- mutex_init(&psSysSpecData->sPowerLock);
-
- atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
-
- psCLK = clk_get(NULL, SGX_PARENT_CLOCK);
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get Core Clock"));
- goto ExitError;
- }
- psSysSpecData->psCORE_CK = psCLK;
-
- psCLK = clk_get(NULL, "sgx_fck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
- goto ExitError;
- }
- psSysSpecData->psSGX_FCK = psCLK;
-
- psCLK = clk_get(NULL, "sgx_ick");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get SGX Interface Clock"));
- goto ExitError;
- }
- psSysSpecData->psSGX_ICK = psCLK;
-
-#if defined(DEBUG)
- psCLK = clk_get(NULL, "mpu_ck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get MPU Clock"));
- goto ExitError;
- }
- psSysSpecData->psMPU_CK = psCLK;
-#endif
- res = clk_set_parent(psSysSpecData->psSGX_FCK, psSysSpecData->psCORE_CK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set SGX parent clock (%d)", res));
- goto ExitError;
- }
-
- psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
- }
-
-#if defined(DEBUG) || defined(TIMING)
-
- psCLK = clk_get(NULL, "gpt11_fck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
- goto ExitUnRegisterConstraintNotifications;
- }
- psSysSpecData->psGPT11_FCK = psCLK;
-
- psCLK = clk_get(NULL, "gpt11_ick");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
- goto ExitUnRegisterConstraintNotifications;
- }
- psSysSpecData->psGPT11_ICK = psCLK;
-
- sys_ck = clk_get(NULL, "sys_ck");
- if (IS_ERR(sys_ck))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
- goto ExitUnRegisterConstraintNotifications;
- }
-
- if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck)
- {
- PVR_TRACE(("Setting GPTIMER11 parent to System Clock"));
- res = clk_set_parent(psSysSpecData->psGPT11_FCK, sys_ck);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
- }
- }
-
- rate = clk_get_rate(psSysSpecData->psGPT11_FCK);
- PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate)));
-
- res = clk_enable(psSysSpecData->psGPT11_FCK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
- }
-
- res = clk_enable(psSysSpecData->psGPT11_ICK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
- goto ExitDisableGPT11FCK;
- }
-
-
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- &hTimerEnable);
-
- if (pui32TimerEnable == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
- goto ExitDisableGPT11ICK;
- }
-
- rate = *pui32TimerEnable;
- if(!(rate & 4))
- {
- PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
-
-
- *pui32TimerEnable = rate | 4;
- }
-
- OSUnMapPhysToLin(pui32TimerEnable,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- hTimerEnable);
-
-
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- &hTimerEnable);
-
- if (pui32TimerEnable == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
- goto ExitDisableGPT11ICK;
- }
-
-
- *pui32TimerEnable = 3;
-
- OSUnMapPhysToLin(pui32TimerEnable,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- hTimerEnable);
-
-#endif
-
- eError = PVRSRV_OK;
- goto Exit;
-
-#if defined(DEBUG) || defined(TIMING)
-ExitDisableGPT11ICK:
- clk_disable(psSysSpecData->psGPT11_ICK);
-ExitDisableGPT11FCK:
- clk_disable(psSysSpecData->psGPT11_FCK);
-ExitUnRegisterConstraintNotifications:
-#endif
-ExitError:
- eError = PVRSRV_ERROR_DISABLE_CLOCK_FAILURE;
-Exit:
- return eError;
-}
-
-IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
-{
-#if defined(DEBUG) || defined(TIMING)
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- IMG_CPU_PHYADDR TimerRegPhysBase;
- IMG_HANDLE hTimerDisable;
- IMG_UINT32 *pui32TimerDisable;
-#endif
-
- PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
-
-
- DisableSGXClocks(psSysData);
-
-#if defined(DEBUG) || defined(TIMING)
-
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
- pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- &hTimerDisable);
-
- if (pui32TimerDisable == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: OSMapPhysToLin failed"));
- }
- else
- {
- *pui32TimerDisable = 0;
-
- OSUnMapPhysToLin(pui32TimerDisable,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- hTimerDisable);
- }
-
- clk_disable(psSysSpecData->psGPT11_ICK);
-
- clk_disable(psSysSpecData->psGPT11_FCK);
-
-#endif
-}
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
*
******************************************************************************/
+#include "sysconfig.h"
#include "services_headers.h"
#include "kerneldisplay.h"
#include "oemfuncs.h"
#include "sgxinfo.h"
#include "sgxinfokm.h"
#include "syslocal.h"
-#include "sysconfig.h"
#include "ocpdefs.h"
-#if !defined(NO_HARDWARE) && \
- defined(SYS_USING_INTERRUPTS) && \
- defined(SGX530) && (SGX_CORE_REV == 125)
-#define SGX_OCP_REGS_ENABLED
-#endif
-
SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
SYS_DATA gsSysData;
#define DEVICE_SGX_INTERRUPT (1 << 0)
-#if defined(NO_HARDWARE)
+#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED)
static IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr;
#endif
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+extern struct platform_device *gpsPVRLDMDev;
+#endif
+
IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32 Ioctl,
IMG_BYTE *pInBuf,
IMG_UINT32 InBufLen,
IMG_UINT32 OutBufLen,
IMG_UINT32 *pdwBytesTransferred);
-#if defined(DEBUG) && defined(DUMP_OMAP34xx_CLOCKS) && defined(__linux__)
-
-#pragma GCC diagnostic ignored "-Wstrict-prototypes"
-#include <mach/clock.h>
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
-#include <../mach-omap2/clock_34xx.h>
-#define ONCHIP_CLKS onchip_clks
-#else
-#include <../mach-omap2/clock34xx.h>
-#define ONCHIP_CLKS onchip_34xx_clks
-#endif
-
-static void omap3_clk_recalc(struct clk *clk) {}
-static void omap3_followparent_recalc(struct clk *clk) {}
-static void omap3_propagate_rate(struct clk *clk) {}
-static void omap3_table_recalc(struct clk *clk) {}
-static long omap3_round_to_table_rate(struct clk *clk, unsigned long rate) { return 0; }
-static int omap3_select_table_rate(struct clk *clk, unsigned long rate) { return 0; }
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
-static void omap3_dpll_recalc(struct clk *clk, unsigned long parent_rate,
- u8 rate_storage) {}
-static void omap3_clkoutx2_recalc(struct clk *clk, unsigned long parent_rate,
- u8 rate_storage) {}
-static void omap3_dpll_allow_idle(struct clk *clk) {}
-static void omap3_dpll_deny_idle(struct clk *clk) {}
-static u32 omap3_dpll_autoidle_read(struct clk *clk) { return 0; }
-static int omap3_noncore_dpll_enable(struct clk *clk) { return 0; }
-static void omap3_noncore_dpll_disable(struct clk *clk) {}
-static int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate) { return 0; }
-static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) { return 0; }
-void followparent_recalc(struct clk *clk, unsigned long new_parent_rate,
- u8 rate_storage) {}
-long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate) { return 0; }
-void omap2_clksel_recalc(struct clk *clk, unsigned long new_parent_rate,
- u8 rate_storage) {}
-long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate) { return 0; }
-int omap2_clksel_set_rate(struct clk *clk, unsigned long rate) { return 0; }
-void omap2_fixed_divisor_recalc(struct clk *clk, unsigned long new_parent_rate,
- u8 rate_storage) {}
-void omap2_init_clksel_parent(struct clk *clk) {}
-#endif
-
-static void dump_omap34xx_clocks(void)
-{
- struct clk **c;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
- struct vdd_prcm_config *t1 = vdd1_rate_table;
- struct vdd_prcm_config *t2 = vdd2_rate_table;
-
- t1 = t1;
- t2 = t2;
-#else
-
- omap3_dpll_allow_idle(0);
- omap3_dpll_deny_idle(0);
- omap3_dpll_autoidle_read(0);
- omap3_clk_recalc(0);
- omap3_followparent_recalc(0);
- omap3_propagate_rate(0);
- omap3_table_recalc(0);
- omap3_round_to_table_rate(0, 0);
- omap3_select_table_rate(0, 0);
-#endif
-
- for(c = ONCHIP_CLKS; c < ONCHIP_CLKS + ARRAY_SIZE(ONCHIP_CLKS); c++)
- {
- struct clk *cp = *c, *copy;
- unsigned long rate;
- copy = clk_get(NULL, cp->name);
- if(!copy)
- continue;
- rate = clk_get_rate(copy);
- if (rate < 1000000)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: clock %s is %lu KHz (%lu Hz)", __func__, cp->name, rate/1000, rate));
- }
- else
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: clock %s is %lu MHz (%lu Hz)", __func__, cp->name, rate/1000000, rate));
- }
- }
-}
-
-#else
-
-static INLINE void dump_omap34xx_clocks(void) {}
-
-#endif
-
#if defined(SGX_OCP_REGS_ENABLED)
-#define SYS_OMAP3430_OCP_REGS_SYS_PHYS_BASE (SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE + EUR_CR_OCP_REVISION)
-#define SYS_OMAP3430_OCP_REGS_SIZE 0x110
-
static IMG_CPU_VIRTADDR gpvOCPRegsLinAddr;
static PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
{
PVRSRV_ERROR eError = EnableSGXClocks(psSysData);
+#if !defined(SGX_OCP_NO_INT_BYPASS)
if(eError == PVRSRV_OK)
{
- OSWriteHWReg(gpvOCPRegsLinAddr,
- EUR_CR_OCP_DEBUG_CONFIG - EUR_CR_OCP_REVISION,
- EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_SYSCONFIG, 0x14);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_DEBUG_CONFIG, EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
}
-
+#endif
return eError;
}
if(eError == PVRSRV_OK)
{
- EnableSGXClocksWrap(psSysData);
+ eError = EnableSGXClocksWrap(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ DisableSystemClocks(psSysData);
+ }
}
#endif
#if defined(NO_HARDWARE)
PVRSRV_ERROR eError;
IMG_CPU_PHYADDR sCpuPAddr;
+#else
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+ struct resource *dev_res;
+ int dev_irq;
+#endif
#endif
PVR_UNREFERENCED_PARAMETER(psSysData);
#if defined(NO_HARDWARE)
- eError = OSBaseAllocContigMemory(SYS_OMAP3430_SGX_REGS_SIZE,
+ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
+
+ eError = OSBaseAllocContigMemory(gsSGXDeviceMap.ui32RegsSize,
&gsSGXRegsCPUVAddr,
&sCpuPAddr);
if(eError != PVRSRV_OK)
}
gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;
gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);
- gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
#if defined(__linux__)
gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL;
#endif
- OSMemSet(gsSGXRegsCPUVAddr, 0, SYS_OMAP3430_SGX_REGS_SIZE);
+ OSMemSet(gsSGXRegsCPUVAddr, 0, gsSGXDeviceMap.ui32RegsSize);
gsSGXDeviceMap.ui32IRQ = 0;
#else
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+
+ dev_res = platform_get_resource(gpsPVRLDMDev, IORESOURCE_MEM, 0);
+ if (dev_res == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_resource failed", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+ dev_irq = platform_get_irq(gpsPVRLDMDev, 0);
+ if (dev_irq < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_irq failed (%d)", __FUNCTION__, -dev_irq));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ gsSGXDeviceMap.sRegsSysPBase.uiAddr = dev_res->start;
+ gsSGXDeviceMap.sRegsCpuPBase =
+ SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
+ PVR_TRACE(("SGX register base: 0x%lx", (unsigned long)gsSGXDeviceMap.sRegsCpuPBase.uiAddr));
+
+ gsSGXDeviceMap.ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start);
+ PVR_TRACE(("SGX register size: %d",gsSGXDeviceMap.ui32RegsSize));
+
+ gsSGXDeviceMap.ui32IRQ = dev_irq;
+ PVR_TRACE(("SGX IRQ: %d", gsSGXDeviceMap.ui32IRQ));
+#else
gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE;
gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
gsSGXDeviceMap.ui32IRQ = SYS_OMAP3430_SGX_IRQ;
+#endif
+#if defined(SGX_OCP_REGS_ENABLED)
+ gsSGXRegsCPUVAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ if (gsSGXRegsCPUVAddr == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Failed to map SGX registers"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+ gpvOCPRegsLinAddr = gsSGXRegsCPUVAddr;
+#endif
#endif
#if defined(PDUMP)
}
-IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion)
+static IMG_CHAR *SysCreateVersionString(void)
{
static IMG_CHAR aszVersionString[100];
SYS_DATA *psSysData;
#if !defined(NO_HARDWARE)
IMG_VOID *pvRegsLinAddr;
- pvRegsLinAddr = OSMapPhysToLin(sRegRegion,
+ pvRegsLinAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
SYS_OMAP3430_SGX_REGS_SIZE,
PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
IMG_NULL);
IMG_UINT32 i;
PVRSRV_ERROR eError;
PVRSRV_DEVICE_NODE *psDeviceNode;
+#if !defined(PVR_NO_OMAP_TIMER)
IMG_CPU_PHYADDR TimerRegPhysBase;
+#endif
#if !defined(SGX_DYNAMIC_TIMING_INFO)
SGX_TIMING_INFORMATION* psTimingInfo;
#endif
return eError;
}
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE;
- gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
- gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
- OSReservePhys(TimerRegPhysBase,
- 4,
- PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
- (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
- &gpsSysData->hSOCTimerRegisterOSMemHandle);
-
#if !defined(SGX_DYNAMIC_TIMING_INFO)
psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
return eError;
}
SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);
-
-#if defined(SGX_OCP_REGS_ENABLED)
+#if 0
+ eError = SysPMRuntimeRegister();
+ if (eError != PVRSRV_OK)
{
- IMG_SYS_PHYADDR sOCPRegsSysPBase;
- IMG_CPU_PHYADDR sOCPRegsCpuPBase;
-
- sOCPRegsSysPBase.uiAddr = SYS_OMAP3430_OCP_REGS_SYS_PHYS_BASE;
- sOCPRegsCpuPBase = SysSysPAddrToCpuPAddr(sOCPRegsSysPBase);
-
- gpvOCPRegsLinAddr = OSMapPhysToLin(sOCPRegsCpuPBase,
- SYS_OMAP3430_OCP_REGS_SIZE,
- PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
- IMG_NULL);
-
- if (gpvOCPRegsLinAddr == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to map OCP registers"));
- return PVRSRV_ERROR_BAD_MAPPING;
- }
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register with OSPM!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
}
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME);
#endif
-
}
#endif
- dump_omap34xx_clocks();
-
eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);
if (eError != PVRSRV_OK)
{
DisableSGXClocks(gpsSysData);
#endif
+#if !defined(PVR_NO_OMAP_TIMER)
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ TimerRegPhysBase = gsSysSpecificData.sTimerRegPhysBase;
+#else
+ TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE;
+#endif
+ gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
+ gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
+ if (TimerRegPhysBase.uiAddr != 0)
+ {
+ OSReservePhys(TimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
+ &gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+#endif
+
return PVRSRV_OK;
}
eError = EnableSGXClocksWrap(gpsSysData);
if (eError != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
- (IMG_VOID)SysDeinitialise(gpsSysData);
- gpsSysData = IMG_NULL;
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to Enable SGX clocks (%d)", eError));
return eError;
}
#endif
-#if defined(SYS_USING_INTERRUPTS)
-
eError = OSInstallMISR(gpsSysData);
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install MISR"));
- (IMG_VOID)SysDeinitialise(gpsSysData);
- gpsSysData = IMG_NULL;
return eError;
}
SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR);
+#if defined(SYS_USING_INTERRUPTS)
eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install ISR"));
- (IMG_VOID)SysDeinitialise(gpsSysData);
- gpsSysData = IMG_NULL;
return eError;
}
SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
#endif
+#if defined(__linux__)
- gpsSysData->pszVersionString = SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
+ gpsSysData->pszVersionString = SysCreateVersionString();
if (!gpsSysData->pszVersionString)
{
PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string"));
}
else
{
- PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
+ PVR_TRACE(("SysFinalise: Version string: %s", gpsSysData->pszVersionString));
}
+#endif
#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
{
PVRSRV_ERROR eError;
+ if(gpsSysData->pvSOCTimerRegisterKM)
+ {
+ OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+
#if defined(SYS_USING_INTERRUPTS)
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
{
return eError;
}
}
+#endif
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR))
{
return eError;
}
}
-#else
- PVR_UNREFERENCED_PARAMETER(psSysData);
-#endif
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV))
{
return eError;
}
}
-
-#if defined(SGX_OCP_REGS_ENABLED)
- OSUnMapPhysToLin(gpvOCPRegsLinAddr,
- SYS_OMAP3430_OCP_REGS_SIZE,
- PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
- IMG_NULL);
+#if 0
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME))
+ {
+ eError = SysPMRuntimeUnregister();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to unregister with OSPM!"));
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ }
#endif
-
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
}
}
- if(gpsSysData->pvSOCTimerRegisterKM)
- {
- OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
- 4,
- PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
- gpsSysData->hSOCTimerRegisterOSMemHandle);
- }
-
SysDeinitialiseCommon(gpsSysData);
-#if defined(NO_HARDWARE)
- if(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV))
+#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED)
+ if(gsSGXRegsCPUVAddr != IMG_NULL)
{
+#if defined(NO_HARDWARE)
OSBaseFreeContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase);
- }
+#else
+#if defined(SGX_OCP_REGS_ENABLED)
+ OSUnMapPhysToLin(gsSGXRegsCPUVAddr,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ gpvOCPRegsLinAddr = IMG_NULL;
#endif
+#endif
+ gsSGXRegsCPUVAddr = IMG_NULL;
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+ }
+#endif
gpsSysSpecificData->ui32SysSpecificData = 0;
IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
{
- PVR_UNREFERENCED_PARAMETER(psSysData);
PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
-
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#else
+#if defined(SGX_OCP_NO_INT_BYPASS)
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1);
+#endif
- OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM,
- EUR_CR_EVENT_HOST_CLEAR);
+ OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR);
+#endif
}
+#if defined(SGX_OCP_NO_INT_BYPASS)
+IMG_VOID SysEnableSGXInterrupts(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
+ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_LISR) && !SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_SET_2, 0x1);
+ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
+ }
+}
+
+IMG_VOID SysDisableSGXInterrupts(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
+
+ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_CLR_2, 0x1);
+ SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
+ }
+}
+#endif
PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
{
#if !defined(__SOCCONFIG_H__)
#define __SOCCONFIG_H__
-#include "syscommon.h"
-#define VS_PRODUCT_NAME "OMAP3"
+#define VS_PRODUCT_NAME "OMAP3630"
-#if defined(SGX530) && (SGX_CORE_REV == 125)
-#define SYS_SGX_CLOCK_SPEED 200000000
-#else
-#define SYS_SGX_CLOCK_SPEED 110666666
-#endif
+#define SYS_SGX_CLOCK_SPEED 200000000
#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
#define SYS_SGX_PDS_TIMER_FREQ (1000)
#if !defined(SYS_SGX_ACTIVE_POWER_LATENCY_MS)
-#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (1)
+#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (2)
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#if defined(PVR_LINUX_USING_WORKQUEUES)
#define MAX_HW_TIME_US (1000000)
+#define WAIT_TRY_COUNT (20000)
#else
#define MAX_HW_TIME_US (500000)
+#define WAIT_TRY_COUNT (10000)
#endif
-#define WAIT_TRY_COUNT (10000)
-#define SYS_DEVICE_COUNT 3
+#define SYS_DEVICE_COUNT 15
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#endif
#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if !defined(LDM_PLATFORM)
+#error "LDM_PLATFORM must be set"
+#endif
+/*#define PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO*/
+//#include <linux/platform_device.h>
+#endif
+
+#if ((defined(DEBUG) || defined(TIMING)) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))) && \
+ !defined(PVR_NO_OMAP_TIMER)
+#define PVR_OMAP3_TIMING_PRCM
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*#include <plat/gpu.h>*/
+#if !defined(PVR_NO_OMAP_TIMER)
+/*#define PVR_OMAP_USE_DM_TIMER_API*/
+//#include <plat/dmtimer.h>
+#endif
+#endif
+
+#if !defined(PVR_NO_OMAP_TIMER)
+//#define PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA
+#endif
#endif
+#if !defined(NO_HARDWARE) && \
+ defined(SYS_USING_INTERRUPTS) && \
+ defined(SGX540)
+#define SGX_OCP_REGS_ENABLED
+#endif
+
+#if defined(__linux__)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) && defined(SGX_OCP_REGS_ENABLED)
+/*#define SGX_OCP_NO_INT_BYPASS*/
+#endif
+#endif
+
#if defined (__cplusplus)
extern "C" {
#endif
-IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion);
-
IMG_VOID DisableSystemClocks(SYS_DATA *psSysData);
PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData);
#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00000200
#define SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS 0x00000400
+#define SYS_SPECIFIC_DATA_ENABLE_OCPREGS 0x00000800
+#define SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME 0x00001000
+#if defined(SGX_OCP_REGS_ENABLED) && defined(SGX_OCP_NO_INT_BYPASS)
+#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00002000
+#endif
#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
IMG_UINT32 ui32SysSpecificData;
PVRSRV_DEVICE_NODE *psSGXDevNode;
IMG_BOOL bSGXInitComplete;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ IMG_CPU_PHYADDR sTimerRegPhysBase;
+#endif
#if !defined(__linux__)
IMG_BOOL bSGXClocksEnabled;
#endif
atomic_t sNotifyLockCPU;
IMG_BOOL bCallVDD2PostFunc;
#endif
- struct clk *psCORE_CK;
- struct clk *psSGX_FCK;
- struct clk *psSGX_ICK;
- struct clk *psMPU_CK;
+ struct clk *psCORE_CK;
+ struct clk *psSGX_FCK;
+ struct clk *psSGX_ICK;
+
#if defined(DEBUG) || defined(TIMING)
struct clk *psGPT11_FCK;
struct clk *psGPT11_ICK;
#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
- struct constraint_handle *pVdd2Handle;
-#endif
+#if defined(PVR_OMAP_USE_DM_TIMER_API)
+ struct omap_dm_timer *psGPTimer;
+#endif
#endif
} SYS_SPECIFIC_DATA;
extern SYS_SPECIFIC_DATA *gpsSysSpecificData;
+#if defined(SGX_OCP_REGS_ENABLED) && defined(SGX_OCP_NO_INT_BYPASS)
+IMG_VOID SysEnableSGXInterrupts(SYS_DATA* psSysData);
+IMG_VOID SysDisableSGXInterrupts(SYS_DATA* psSysData);
+#else
+#define SysEnableSGXInterrupts(psSysData)
+#define SysDisableSGXInterrupts(psSysData)
+#endif
+
#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
#endif
+#if defined(__linux__)
+
+PVRSRV_ERROR SysPMRuntimeRegister(void);
+PVRSRV_ERROR SysPMRuntimeUnregister(void);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysPMRuntimeRegister)
+#endif
+static INLINE PVRSRV_ERROR SysPMRuntimeRegister(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysPMRuntimeUnregister)
+#endif
+static INLINE PVRSRV_ERROR SysPMRuntimeUnregister(void)
+{
+ return PVRSRV_OK;
+}
+
+#endif
+
#if defined(__cplusplus)
}
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
******************************************************************************/
#if defined(__linux__)
-#if defined(PVR_LINUX_USING_WORKQUEUES)
-#include "sysutils_linux_wqueue_compat.c"
-#else
#include "sysutils_linux.c"
#endif
-#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hardirq.h>
-#include <linux/spinlock.h>
-#include <asm/bug.h>
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
-#include <linux/semaphore.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31))
-#include <plat/resource.h>
-#else
-#include <mach/resource.h>
-#endif
-#else
-#include <asm/semaphore.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
-#include <asm/arch/resource.h>
-#endif
-#endif
+#include <linux/mutex.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22)) && \
- (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
-#define CONSTRAINT_NOTIFICATIONS
-#endif
#include "sgxdefs.h"
#include "services_headers.h"
#include "sysinfo.h"
#include "sgxinfokm.h"
#include "syslocal.h"
+//#include <linux/platform_device.h>
+//#include <linux/pm_runtime.h>
+
#define ONE_MHZ 1000000
#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
#define SGX_PARENT_CLOCK "core_ck"
#endif
-#if !defined(PDUMP) && !defined(NO_HARDWARE)
-static IMG_BOOL PowerLockWrappedOnCPU(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- IMG_INT iCPU;
- IMG_BOOL bLocked = IMG_FALSE;
-
- if (!in_interrupt())
- {
- iCPU = get_cpu();
- bLocked = (iCPU == atomic_read(&psSysSpecData->sPowerLockCPU));
-
- put_cpu();
- }
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+extern struct platform_device *gpsPVRLDMDev;
+#endif
- return bLocked;
-}
-static IMG_VOID PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData)
+static PVRSRV_ERROR PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData, IMG_BOOL bTryLock)
{
- IMG_INT iCPU;
-
- if (!in_interrupt())
- {
-
- iCPU = get_cpu();
-
-
- PVR_ASSERT(iCPU != -1);
-
- PVR_ASSERT(!PowerLockWrappedOnCPU(psSysSpecData));
-
- spin_lock(&psSysSpecData->sPowerLock);
-
- atomic_set(&psSysSpecData->sPowerLockCPU, iCPU);
- }
+ if (!in_interrupt())
+ {
+ if (bTryLock)
+ {
+ int locked = mutex_trylock(&psSysSpecData->sPowerLock);
+ if (locked == 0)
+ {
+ return PVRSRV_ERROR_RETRY;
+ }
+ }
+ else
+ {
+ mutex_lock(&psSysSpecData->sPowerLock);
+ }
+ }
+
+ return PVRSRV_OK;
}
static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA *psSysSpecData)
{
- if (!in_interrupt())
- {
- PVR_ASSERT(PowerLockWrappedOnCPU(psSysSpecData));
-
- atomic_set(&psSysSpecData->sPowerLockCPU, -1);
-
- spin_unlock(&psSysSpecData->sPowerLock);
-
- put_cpu();
- }
+ if (!in_interrupt())
+ {
+ mutex_unlock(&psSysSpecData->sPowerLock);
+ }
}
-PVRSRV_ERROR SysPowerLockWrap(SYS_DATA *psSysData)
+PVRSRV_ERROR SysPowerLockWrap(IMG_BOOL bTryLock)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ SYS_DATA *psSysData;
- PowerLockWrap(psSysSpecData);
+ SysAcquireData(&psSysData);
- return PVRSRV_OK;
+ return PowerLockWrap(psSysData->pvSysSpecificData, bTryLock);
}
-IMG_VOID SysPowerLockUnwrap(SYS_DATA *psSysData)
+IMG_VOID SysPowerLockUnwrap(IMG_VOID)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ SYS_DATA *psSysData;
- PowerLockUnwrap(psSysSpecData);
-}
-#else
-static IMG_BOOL PowerLockWrappedOnCPU(SYS_SPECIFIC_DATA unref__ *psSysSpecData)
-{
- return IMG_FALSE;
-}
+ SysAcquireData(&psSysData);
-static IMG_VOID PowerLockWrap(SYS_SPECIFIC_DATA unref__ *psSysSpecData)
-{
+ PowerLockUnwrap(psSysData->pvSysSpecificData);
}
-static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA unref__ *psSysSpecData)
-{
-}
-PVRSRV_ERROR SysPowerLockWrap(SYS_DATA unref__ *psSysData)
-{
- return PVRSRV_OK;
-}
-IMG_VOID SysPowerLockUnwrap(SYS_DATA unref__ *psSysData)
-{
-}
-#endif
IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
{
- IMG_BOOL bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
-
- if (bPowerLock)
- {
- PowerLockUnwrap(psSysSpecData);
- }
-
- return bPowerLock;
+ return IMG_TRUE;
}
IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
{
- PowerLockWrap(psSysSpecData);
}
static inline IMG_UINT32 scale_by_rate(IMG_UINT32 val, IMG_UINT32 rate1, IMG_UINT32 rate2)
{
IMG_UINT32 rate;
-#if defined(NO_HARDWARE)
rate = SYS_SGX_CLOCK_SPEED;
-#else
+#if !defined(NO_HARDWARE)
PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
-
- rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
- PVR_ASSERT(rate != 0);
#endif
psTimingInfo->ui32CoreClockSpeed = rate;
psTimingInfo->ui32HWRecoveryFreq = scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);
psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
}
-#if defined(CONSTRAINT_NOTIFICATIONS)
-#if !defined(SGX_DYNAMIC_TIMING_INFO)
-#error "SGX_DYNAMIC_TIMING_INFO must be defined for this platform"
-#endif
-
-static struct constraint_id cnstr_id_vdd2 = {
- .type = RES_OPP_CO,
- .data = (IMG_VOID *)"vdd2_opp"
-};
-
-#if !defined(PDUMP) && !defined(NO_HARDWARE)
-static inline IMG_BOOL ConstraintNotificationsEnabled(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- return (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0) && psSysSpecData->bSGXInitComplete && psSysSpecData->bConstraintNotificationsEnabled;
-
-}
-
-static IMG_BOOL NotifyLockedOnCPU(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- IMG_INT iCPU = get_cpu();
- IMG_BOOL bLocked = (iCPU == atomic_read(&psSysSpecData->sNotifyLockCPU));
-
- put_cpu();
-
- return bLocked;
-}
-
-static IMG_VOID NotifyLock(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- IMG_INT iCPU;
-
- BUG_ON(in_interrupt());
-
-
- iCPU = get_cpu();
-
-
- PVR_ASSERT(iCPU != -1);
-
- PVR_ASSERT(!NotifyLockedOnCPU(psSysSpecData));
-
- spin_lock(&psSysSpecData->sNotifyLock);
-
- atomic_set(&psSysSpecData->sNotifyLockCPU, iCPU);
-}
-
-static IMG_VOID NotifyUnlock(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- PVR_ASSERT(NotifyLockedOnCPU(psSysSpecData));
-
- atomic_set(&psSysSpecData->sNotifyLockCPU, -1);
-
- spin_unlock(&psSysSpecData->sNotifyLock);
-
- put_cpu();
-}
-
-static int VDD2PostFunc(struct notifier_block *n, unsigned long event, IMG_VOID *ptr)
-{
- PVR_UNREFERENCED_PARAMETER(n);
- PVR_UNREFERENCED_PARAMETER(event);
- PVR_UNREFERENCED_PARAMETER(ptr);
-
- if (in_interrupt())
- {
- PVR_DPF((PVR_DBG_ERROR, "%s Called in interrupt context. Ignoring.", __FUNCTION__));
- return 0;
- }
-
-
- if (!NotifyLockedOnCPU(gpsSysSpecificData))
- {
- return 0;
- }
-
-#if defined(DEBUG)
- if (ConstraintNotificationsEnabled(gpsSysSpecificData))
- {
- IMG_UINT32 rate;
-
- rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
-
- PVR_ASSERT(rate != 0);
-
- PVR_DPF((PVR_DBG_MESSAGE, "%s: SGX clock rate: %dMHz", __FUNCTION__, HZ_TO_MHZ(rate)));
- }
-#endif
- if (gpsSysSpecificData->bCallVDD2PostFunc)
- {
- PVRSRVDevicePostClockSpeedChange(gpsSysSpecificData->psSGXDevNode->sDevId.ui32DeviceIndex, IMG_TRUE, IMG_NULL);
-
- gpsSysSpecificData->bCallVDD2PostFunc = IMG_FALSE;
- }
- else
- {
- if (ConstraintNotificationsEnabled(gpsSysSpecificData))
- {
- PVR_TRACE(("%s: Not calling PVR clock speed notification functions", __FUNCTION__));
- }
- }
-
- NotifyUnlock(gpsSysSpecificData);
-
- return 0;
-}
-
-static int VDD2PreFunc(struct notifier_block *n, unsigned long event, IMG_VOID *ptr)
-{
- PVR_UNREFERENCED_PARAMETER(n);
- PVR_UNREFERENCED_PARAMETER(event);
- PVR_UNREFERENCED_PARAMETER(ptr);
-
- if (in_interrupt())
- {
- PVR_DPF((PVR_DBG_WARNING, "%s Called in interrupt context. Ignoring.", __FUNCTION__));
- return 0;
- }
-
- if (PowerLockWrappedOnCPU(gpsSysSpecificData))
- {
- PVR_DPF((PVR_DBG_WARNING, "%s Called from within a power transition. Ignoring.", __FUNCTION__));
- return 0;
- }
-
- NotifyLock(gpsSysSpecificData);
-
- PVR_ASSERT(!gpsSysSpecificData->bCallVDD2PostFunc);
-
- if (ConstraintNotificationsEnabled(gpsSysSpecificData))
- {
- PVRSRV_ERROR eError;
-
- eError = PVRSRVDevicePreClockSpeedChange(gpsSysSpecificData->psSGXDevNode->sDevId.ui32DeviceIndex, IMG_TRUE, IMG_NULL);
-
- gpsSysSpecificData->bCallVDD2PostFunc = (eError == PVRSRV_OK);
-
- }
-
- return 0;
-}
-
-static struct notifier_block sVDD2Pre = {
- VDD2PreFunc,
- NULL
-};
-
-static struct notifier_block sVDD2Post = {
- VDD2PostFunc,
- NULL
-};
-
-static IMG_VOID RegisterConstraintNotifications(IMG_VOID)
-{
- PVR_TRACE(("Registering constraint notifications"));
-
- PVR_ASSERT(!gpsSysSpecificData->bConstraintNotificationsEnabled);
-
- constraint_register_pre_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Pre,
- max_vdd2_opp+1);
-
- constraint_register_post_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Post,
- max_vdd2_opp+1);
-
-
- NotifyLock(gpsSysSpecificData);
- gpsSysSpecificData->bConstraintNotificationsEnabled = IMG_TRUE;
- NotifyUnlock(gpsSysSpecificData);
-
- PVR_TRACE(("VDD2 constraint notifications registered"));
-}
-
-static IMG_VOID UnRegisterConstraintNotifications(IMG_VOID)
-{
- PVR_TRACE(("Unregistering constraint notifications"));
-
-
- NotifyLock(gpsSysSpecificData);
- gpsSysSpecificData->bConstraintNotificationsEnabled = IMG_FALSE;
- NotifyUnlock(gpsSysSpecificData);
-
-
- constraint_unregister_pre_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Pre,
- max_vdd2_opp+1);
-
- constraint_unregister_post_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Post,
- max_vdd2_opp+1);
-}
-#else
-static IMG_VOID RegisterConstraintNotifications(IMG_VOID)
-{
-}
-
-static IMG_VOID UnRegisterConstraintNotifications(IMG_VOID)
-{
-}
-#endif
-#endif
-
PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
{
#if !defined(NO_HARDWARE)
SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- long lNewRate;
- long lRate;
IMG_INT res;
-
+ long lRate,lNewRate;
if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
{
}
PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));
-
-#if defined(DEBUG)
- {
-
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psMPU_CK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: CPU Clock is %dMhz", HZ_TO_MHZ(rate)));
- }
-#endif
-
- res = clk_enable(psSysSpecData->psSGX_FCK);
+
+ res=clk_enable(psSysSpecData->psSGX_FCK);
if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+ }
- res = clk_enable(psSysSpecData->psSGX_ICK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", res));
+ res=clk_enable(psSysSpecData->psSGX_ICK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", res));
- clk_disable(psSysSpecData->psSGX_FCK);
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
+ clk_disable(psSysSpecData->psSGX_FCK);
+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+ }
lNewRate = clk_round_rate(psSysSpecData->psSGX_FCK, SYS_SGX_CLOCK_SPEED + ONE_MHZ);
- if (lNewRate <= 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));
- return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE;
- }
-
-
- lRate = clk_get_rate(psSysSpecData->psSGX_FCK);
- if (lRate != lNewRate)
- {
- res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res));
- }
- }
+ if (lNewRate <= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));
+ return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE;
+ }
+
+
+ lRate = clk_get_rate(psSysSpecData->psSGX_FCK);
+ if (lRate != lNewRate)
+ {
+ res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res));
+ return PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE;
+ }
+ }
#if defined(DEBUG)
+ {
+ IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
+ PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
+ }
+#endif
+
+
+
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
{
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
+
+// int res = pm_runtime_get_sync(&gpsPVRLDMDev->dev);
+// if (res < 0)
+// {
+// PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: pm_runtime_get_sync failed (%d)", -res));
+// return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+// }
}
#endif
+// SysEnableSGXInterrupts(psSysData);
atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);
#if !defined(NO_HARDWARE)
SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
-
if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)
{
return;
}
-
+
PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));
+
+ clk_disable(psSysSpecData->psSGX_FCK);
- if (psSysSpecData->psSGX_ICK)
- {
- clk_disable(psSysSpecData->psSGX_ICK);
- }
+ clk_disable(psSysSpecData->psSGX_ICK);
- if (psSysSpecData->psSGX_FCK)
+// SysDisableSGXInterrupts(psSysData);
+
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
{
- clk_disable(psSysSpecData->psSGX_FCK);
+// int res = pm_runtime_put_sync(&gpsPVRLDMDev->dev);
+// if (res < 0)
+// {
+// PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: pm_runtime_put_sync failed (%d)", -res));
+// }
}
+#endif
-
atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
#else
#endif
}
-PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+#if (defined(DEBUG) || defined(TIMING)) && !defined(PVR_NO_OMAP_TIMER)
+#if defined(PVR_OMAP_USE_DM_TIMER_API)
+#define GPTIMER_TO_USE 11
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- struct clk *psCLK;
- IMG_INT res;
- PVRSRV_ERROR eError;
- IMG_BOOL bPowerLock;
+ PVR_ASSERT(psSysSpecData->psGPTimer == NULL);
-#if defined(DEBUG) || defined(TIMING)
- IMG_INT rate;
- struct clk *sys_ck;
- IMG_CPU_PHYADDR TimerRegPhysBase;
- IMG_HANDLE hTimerEnable;
- IMG_UINT32 *pui32TimerEnable;
+
+ psSysSpecData->psGPTimer = omap_dm_timer_request_specific(GPTIMER_TO_USE);
+ if (psSysSpecData->psGPTimer == NULL)
+ {
+
+ PVR_DPF((PVR_DBG_WARNING, "%s: omap_dm_timer_request_specific failed", __FUNCTION__));
+ return PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
+ }
-#endif
+
+ omap_dm_timer_set_source(psSysSpecData->psGPTimer, OMAP_TIMER_SRC_SYS_CLK);
+ omap_dm_timer_enable(psSysSpecData->psGPTimer);
- PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ omap_dm_timer_set_load_start(psSysSpecData->psGPTimer, 1, 0);
- if (!psSysSpecData->bSysClocksOneTimeInit)
- {
- bPowerLock = IMG_FALSE;
+ omap_dm_timer_start(psSysSpecData->psGPTimer);
- spin_lock_init(&psSysSpecData->sPowerLock);
- atomic_set(&psSysSpecData->sPowerLockCPU, -1);
- spin_lock_init(&psSysSpecData->sNotifyLock);
- atomic_set(&psSysSpecData->sNotifyLockCPU, -1);
+
+ psSysSpecData->sTimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE;
- atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+ return PVRSRV_OK;
+}
- psCLK = clk_get(NULL, SGX_PARENT_CLOCK);
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get Core Clock"));
- goto ExitError;
- }
- psSysSpecData->psCORE_CK = psCLK;
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ if (psSysSpecData->psGPTimer != NULL)
+ {
+
+ (void) omap_dm_timer_stop(psSysSpecData->psGPTimer);
- psCLK = clk_get(NULL, "sgx_fck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
- goto ExitError;
- }
- psSysSpecData->psSGX_FCK = psCLK;
+ omap_dm_timer_disable(psSysSpecData->psGPTimer);
- psCLK = clk_get(NULL, "sgx_ick");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get SGX Interface Clock"));
- goto ExitError;
- }
- psSysSpecData->psSGX_ICK = psCLK;
+ omap_dm_timer_free(psSysSpecData->psGPTimer);
-#if defined(DEBUG)
- psCLK = clk_get(NULL, "mpu_ck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get MPU Clock"));
- goto ExitError;
- }
- psSysSpecData->psMPU_CK = psCLK;
-#endif
- res = clk_set_parent(psSysSpecData->psSGX_FCK, psSysSpecData->psCORE_CK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set SGX parent clock (%d)", res));
- goto ExitError;
- }
+ psSysSpecData->sTimerRegPhysBase.uiAddr = 0;
- psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
- }
- else
- {
-
- bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
- if (bPowerLock)
- {
- PowerLockUnwrap(psSysSpecData);
- }
+ psSysSpecData->psGPTimer = NULL;
}
-#if defined(CONSTRAINT_NOTIFICATIONS)
- psSysSpecData->pVdd2Handle = constraint_get(PVRSRV_MODNAME, &cnstr_id_vdd2);
- if (IS_ERR(psSysSpecData->pVdd2Handle))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get VDD2 constraint handle"));
- goto ExitError;
- }
+}
+#else
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+#if defined(PVR_OMAP3_TIMING_PRCM)
+ struct clk *psCLK;
+ IMG_INT res;
+ struct clk *sys_ck;
+ IMG_INT rate;
+#endif
+ PVRSRV_ERROR eError;
- RegisterConstraintNotifications();
+ IMG_CPU_PHYADDR sTimerRegPhysBase;
+ IMG_HANDLE hTimerEnable;
+ IMG_UINT32 *pui32TimerEnable;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ PVR_ASSERT(psSysSpecData->sTimerRegPhysBase.uiAddr == 0);
#endif
-#if defined(DEBUG) || defined(TIMING)
+
+#if defined(PVR_OMAP3_TIMING_PRCM)
psCLK = clk_get(NULL, "gpt11_fck");
if (IS_ERR(psCLK))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
psSysSpecData->psGPT11_FCK = psCLK;
if (IS_ERR(psCLK))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
psSysSpecData->psGPT11_ICK = psCLK;
if (IS_ERR(sys_ck))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck)
if (res < 0)
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
}
-
rate = clk_get_rate(psSysSpecData->psGPT11_FCK);
PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate)));
if (res < 0)
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
res = clk_enable(psSysSpecData->psGPT11_ICK);
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
goto ExitDisableGPT11FCK;
}
+#endif
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
+ sTimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
&hTimerEnable);
goto ExitDisableGPT11ICK;
}
- rate = *pui32TimerEnable;
- if(!(rate & 4))
+ if(!(*pui32TimerEnable & 4))
{
PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
- *pui32TimerEnable = rate | 4;
+ *pui32TimerEnable |= 4;
}
OSUnMapPhysToLin(pui32TimerEnable,
hTimerEnable);
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
+ sTimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
&hTimerEnable);
goto ExitDisableGPT11ICK;
}
-
*pui32TimerEnable = 3;
OSUnMapPhysToLin(pui32TimerEnable,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
hTimerEnable);
-
-#endif
-
-#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
- PVR_TRACE(("EnableSystemClocks: Setting SGX OPP constraint"));
-
-
- res = constraint_set(psSysSpecData->pVdd2Handle, max_vdd2_opp);
- if (res != 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: constraint_set failed (%d)", res));
- goto ExitConstraintSetFailed;
- }
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ psSysSpecData->sTimerRegPhysBase = sTimerRegPhysBase;
#endif
+
eError = PVRSRV_OK;
+
goto Exit;
-#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
-ExitConstraintSetFailed:
-#endif
-#if defined(DEBUG) || defined(TIMING)
ExitDisableGPT11ICK:
+#if defined(PVR_OMAP3_TIMING_PRCM)
clk_disable(psSysSpecData->psGPT11_ICK);
ExitDisableGPT11FCK:
clk_disable(psSysSpecData->psGPT11_FCK);
-ExitUnRegisterConstraintNotifications:
-#endif
-#if defined(CONSTRAINT_NOTIFICATIONS)
- UnRegisterConstraintNotifications();
- constraint_put(psSysSpecData->pVdd2Handle);
-#endif
ExitError:
- eError = PVRSRV_ERROR_DISABLE_CLOCK_FAILURE;
+#endif
+ eError = PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
Exit:
- if (bPowerLock)
- {
- PowerLockWrap(psSysSpecData);
- }
-
return eError;
}
-IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- IMG_BOOL bPowerLock;
-#if defined(DEBUG) || defined(TIMING)
- IMG_CPU_PHYADDR TimerRegPhysBase;
IMG_HANDLE hTimerDisable;
IMG_UINT32 *pui32TimerDisable;
-#endif
-
- PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
-
-
- DisableSGXClocks(psSysData);
-
- bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
- if (bPowerLock)
- {
-
- PowerLockUnwrap(psSysSpecData);
- }
-
-#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
+ IMG_CPU_PHYADDR TimerRegPhysBase;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ if (psSysSpecData->sTimerRegPhysBase.uiAddr == 0)
{
- int res;
-
- PVR_TRACE(("DisableSystemClocks: Removing SGX OPP constraint"));
-
-
- res = constraint_remove(psSysSpecData->pVdd2Handle);
- if (res != 0)
- {
- PVR_DPF((PVR_DBG_WARNING, "DisableSystemClocks: constraint_remove failed (%d)", res));
- }
+ return;
}
#endif
-
-#if defined(CONSTRAINT_NOTIFICATIONS)
- UnRegisterConstraintNotifications();
-#endif
-
-#if defined(DEBUG) || defined(TIMING)
-
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
+ TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
hTimerDisable);
}
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ psSysSpecData->sTimerRegPhysBase.uiAddr = 0;
+#endif
+
+#if defined(PVR_OMAP3_TIMING_PRCM)
clk_disable(psSysSpecData->psGPT11_ICK);
clk_disable(psSysSpecData->psGPT11_FCK);
-
+#endif
+}
+#endif
+#else
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysSpecData);
+ return PVRSRV_OK;
+}
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysSpecData);
+}
#endif
-#if defined(CONSTRAINT_NOTIFICATIONS)
- constraint_put(psSysSpecData->pVdd2Handle);
-#endif
- if (bPowerLock)
+
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ struct clk *psCLK;
+ IMG_INT res;
+ PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ if (!psSysSpecData->bSysClocksOneTimeInit)
{
- PowerLockWrap(psSysSpecData);
+ mutex_init(&psSysSpecData->sPowerLock);
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+
+ psCLK = clk_get(NULL, SGX_PARENT_CLOCK);
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get Core Clock"));
+ return PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK;
+ }
+ psSysSpecData->psCORE_CK = psCLK;
+
+ psCLK = clk_get(NULL, "sgx_fck");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
+ return PVRSRV_ERROR_UNABLE_TO_GET_CLOCK;
+ }
+ psSysSpecData->psSGX_FCK = psCLK;
+
+ psCLK = clk_get(NULL, "sgx_ick");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get SGX Interface Clock"));
+ return PVRSRV_ERROR_UNABLE_TO_GET_CLOCK;
+ }
+ psSysSpecData->psSGX_ICK = psCLK;
+
+ res = clk_set_parent(psSysSpecData->psSGX_FCK, psSysSpecData->psCORE_CK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set SGX parent clock (%d)", res));
+ return PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK;
+ }
+
+
+ psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
}
+
+ return AcquireGPTimer(psSysSpecData);
+}
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
+
+ DisableSGXClocks(psSysData);
+
+ ReleaseGPTimer(psSysSpecData);
+
+}
+
+PVRSRV_ERROR SysPMRuntimeRegister(void)
+{
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+// pm_runtime_enable(&gpsPVRLDMDev->dev);
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysPMRuntimeUnregister(void)
+{
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+// pm_runtime_disable(&gpsPVRLDMDev->dev);
+#endif
+ return PVRSRV_OK;
}
--- /dev/null
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__OEMFUNCS_H__)
+#define __OEMFUNCS_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+typedef IMG_UINT32 (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32 Ioctl,
+ IMG_BYTE *pInBuf,
+ IMG_UINT32 InBufLen,
+ IMG_BYTE *pOutBuf,
+ IMG_UINT32 OutBufLen,
+ IMG_UINT32 *pdwBytesTransferred);
+typedef struct PVRSRV_DC_OEM_JTABLE_TAG
+{
+ PFN_SRV_BRIDGEDISPATCH pfnOEMBridgeDispatch;
+ IMG_PVOID pvDummy1;
+ IMG_PVOID pvDummy2;
+ IMG_PVOID pvDummy3;
+
+} PVRSRV_DC_OEM_JTABLE;
+
+#define OEM_GET_EXT_FUNCS (1<<1)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
+
--- /dev/null
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "sysconfig.h"
+#include "services_headers.h"
+#include "kerneldisplay.h"
+#include "oemfuncs.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#include "syslocal.h"
+
+#include "ocpdefs.h"
+
+SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
+SYS_DATA gsSysData;
+
+static SYS_SPECIFIC_DATA gsSysSpecificData;
+SYS_SPECIFIC_DATA *gpsSysSpecificData;
+
+static IMG_UINT32 gui32SGXDeviceID;
+static SGX_DEVICE_MAP gsSGXDeviceMap;
+static PVRSRV_DEVICE_NODE *gpsSGXDevNode;
+
+#define DEVICE_SGX_INTERRUPT (1 << 0)
+
+#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED)
+static IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr;
+#endif
+
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+extern struct platform_device *gpsPVRLDMDev;
+#endif
+
+IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32 Ioctl,
+ IMG_BYTE *pInBuf,
+ IMG_UINT32 InBufLen,
+ IMG_BYTE *pOutBuf,
+ IMG_UINT32 OutBufLen,
+ IMG_UINT32 *pdwBytesTransferred);
+
+#if defined(SGX_OCP_REGS_ENABLED)
+
+static IMG_CPU_VIRTADDR gpvOCPRegsLinAddr;
+
+static PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
+{
+ PVRSRV_ERROR eError = EnableSGXClocks(psSysData);
+
+#if !defined(SGX_OCP_NO_INT_BYPASS)
+ if(eError == PVRSRV_OK)
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_SYSCONFIG, 0x14);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_DEBUG_CONFIG, EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
+ }
+#endif
+ return eError;
+}
+
+#else
+
+static INLINE PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
+{
+ return EnableSGXClocks(psSysData);
+}
+
+#endif
+
+static INLINE PVRSRV_ERROR EnableSystemClocksWrap(SYS_DATA *psSysData)
+{
+ PVRSRV_ERROR eError = EnableSystemClocks(psSysData);
+
+#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ if(eError == PVRSRV_OK)
+ {
+
+ eError = EnableSGXClocksWrap(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ DisableSystemClocks(psSysData);
+ }
+ }
+#endif
+
+ return eError;
+}
+
+static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
+{
+#if defined(NO_HARDWARE)
+ PVRSRV_ERROR eError;
+ IMG_CPU_PHYADDR sCpuPAddr;
+#else
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+ struct resource *dev_res;
+ int dev_irq;
+#endif
+#endif
+
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+
+
+ gsSGXDeviceMap.ui32Flags = 0x0;
+
+#if defined(NO_HARDWARE)
+
+
+ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP4430_SGX_REGS_SIZE;
+
+ eError = OSBaseAllocContigMemory(gsSGXDeviceMap.ui32RegsSize,
+ &gsSGXRegsCPUVAddr,
+ &sCpuPAddr);
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;
+ gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);
+#if defined(__linux__)
+
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+#else
+
+ gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL;
+#endif
+
+ OSMemSet(gsSGXRegsCPUVAddr, 0, gsSGXDeviceMap.ui32RegsSize);
+
+
+
+
+ gsSGXDeviceMap.ui32IRQ = 0;
+
+#else
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+
+ dev_res = platform_get_resource(gpsPVRLDMDev, IORESOURCE_MEM, 0);
+ if (dev_res == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_resource failed", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ dev_irq = platform_get_irq(gpsPVRLDMDev, 0);
+ if (dev_irq < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_irq failed (%d)", __FUNCTION__, -dev_irq));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ gsSGXDeviceMap.sRegsSysPBase.uiAddr = dev_res->start;
+ gsSGXDeviceMap.sRegsCpuPBase =
+ SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
+ PVR_TRACE(("SGX register base: 0x%lx", (unsigned long)gsSGXDeviceMap.sRegsCpuPBase.uiAddr));
+
+ gsSGXDeviceMap.ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start);
+ PVR_TRACE(("SGX register size: %d",gsSGXDeviceMap.ui32RegsSize));
+
+ gsSGXDeviceMap.ui32IRQ = dev_irq;
+ PVR_TRACE(("SGX IRQ: %d", gsSGXDeviceMap.ui32IRQ));
+#else
+ gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP4430_SGX_REGS_SYS_PHYS_BASE;
+ gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
+ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP4430_SGX_REGS_SIZE;
+
+ gsSGXDeviceMap.ui32IRQ = SYS_OMAP4430_SGX_IRQ;
+
+#endif
+#if defined(SGX_OCP_REGS_ENABLED)
+ gsSGXRegsCPUVAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ if (gsSGXRegsCPUVAddr == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Failed to map SGX registers"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+ gpvOCPRegsLinAddr = gsSGXRegsCPUVAddr;
+#endif
+#endif
+
+#if defined(PDUMP)
+ {
+
+ static IMG_CHAR pszPDumpDevName[] = "SGXMEM";
+ gsSGXDeviceMap.pszPDumpDevName = pszPDumpDevName;
+ }
+#endif
+
+
+
+
+ return PVRSRV_OK;
+}
+
+
+static IMG_CHAR *SysCreateVersionString(void)
+{
+ static IMG_CHAR aszVersionString[100];
+ SYS_DATA *psSysData;
+ IMG_UINT32 ui32SGXRevision;
+ IMG_INT32 i32Count;
+#if !defined(NO_HARDWARE)
+ IMG_VOID *pvRegsLinAddr;
+
+ pvRegsLinAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+ if(!pvRegsLinAddr)
+ {
+ return IMG_NULL;
+ }
+
+ ui32SGXRevision = OSReadHWReg((IMG_PVOID)((IMG_PBYTE)pvRegsLinAddr),
+ EUR_CR_CORE_REVISION);
+#else
+ ui32SGXRevision = 0;
+#endif
+
+ SysAcquireData(&psSysData);
+
+ i32Count = OSSNPrintf(aszVersionString, 100,
+ "SGX revision = %u.%u.%u",
+ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
+ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
+ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
+ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
+ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
+ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
+ );
+
+#if !defined(NO_HARDWARE)
+ OSUnMapPhysToLin(pvRegsLinAddr,
+ SYS_OMAP4430_SGX_REGS_SIZE,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+#endif
+
+ if(i32Count == -1)
+ {
+ return IMG_NULL;
+ }
+
+ return aszVersionString;
+}
+
+
+PVRSRV_ERROR SysInitialise(IMG_VOID)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+#if !defined(PVR_NO_OMAP_TIMER)
+ IMG_CPU_PHYADDR TimerRegPhysBase;
+#endif
+#if !defined(SGX_DYNAMIC_TIMING_INFO)
+ SGX_TIMING_INFORMATION* psTimingInfo;
+#endif
+ gpsSysData = &gsSysData;
+ OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
+
+ gpsSysSpecificData = &gsSysSpecificData;
+ OSMemSet(gpsSysSpecificData, 0, sizeof(SYS_SPECIFIC_DATA));
+
+ gpsSysData->pvSysSpecificData = gpsSysSpecificData;
+
+ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA);
+
+ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
+
+
+ for(i=0; i<SYS_DEVICE_COUNT; i++)
+ {
+ gpsSysData->sDeviceID[i].uiID = i;
+ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
+ }
+
+ gpsSysData->psDeviceNodeList = IMG_NULL;
+ gpsSysData->psQueueList = IMG_NULL;
+
+ eError = SysInitialiseCommon(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+
+#if !defined(SGX_DYNAMIC_TIMING_INFO)
+
+ psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
+ psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
+ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ psTimingInfo->bEnableActivePM = IMG_TRUE;
+#else
+ psTimingInfo->bEnableActivePM = IMG_FALSE;
+#endif
+ psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
+ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
+#endif
+
+
+
+ gpsSysSpecificData->ui32SrcClockDiv = 3;
+
+
+
+
+
+ eError = SysLocateDevices(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);
+
+ eError = SysPMRuntimeRegister();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register with OSPM!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME);
+
+
+
+
+ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
+ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_REGDEV);
+
+
+
+
+
+ psDeviceNode = gpsSysData->psDeviceNodeList;
+ while(psDeviceNode)
+ {
+
+ switch(psDeviceNode->sDevId.eDeviceType)
+ {
+ case PVRSRV_DEVICE_TYPE_SGX:
+ {
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+
+
+
+
+ psDeviceNode->psLocalDevMemArena = IMG_NULL;
+
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+
+ for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
+ {
+ psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
+ }
+
+ gpsSGXDevNode = psDeviceNode;
+ gsSysSpecificData.psSGXDevNode = psDeviceNode;
+
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+
+ psDeviceNode = psDeviceNode->psNext;
+ }
+
+ eError = EnableSystemClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable system clocks (%d)", eError));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ eError = EnableSGXClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+#endif
+
+ eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV);
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+
+ DisableSGXClocks(gpsSysData);
+#endif
+
+#if !defined(PVR_NO_OMAP_TIMER)
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ TimerRegPhysBase = gsSysSpecificData.sTimerRegPhysBase;
+#else
+ TimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_REGS_SYS_PHYS_BASE;
+#endif
+ gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
+ gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
+ if (TimerRegPhysBase.uiAddr != 0)
+ {
+ OSReservePhys(TimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
+ &gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysFinalise(IMG_VOID)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ eError = EnableSGXClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to Enable SGX clocks (%d)", eError));
+ return eError;
+ }
+#endif
+
+ eError = OSInstallMISR(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install MISR"));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR);
+
+#if defined(SYS_USING_INTERRUPTS)
+
+ eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install ISR"));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+#endif
+
+#if defined(__linux__)
+
+ gpsSysData->pszVersionString = SysCreateVersionString();
+ if (!gpsSysData->pszVersionString)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string"));
+ }
+ else
+ {
+ PVR_TRACE(("SysFinalise: Version string: %s", gpsSysData->pszVersionString));
+ }
+#endif
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+
+ DisableSGXClocks(gpsSysData);
+#endif
+
+ gpsSysSpecificData->bSGXInitComplete = IMG_TRUE;
+
+ return eError;
+}
+
+
+PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
+{
+ PVRSRV_ERROR eError;
+
+ if(gpsSysData->pvSOCTimerRegisterKM)
+ {
+ OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+
+#if defined(SYS_USING_INTERRUPTS)
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
+ {
+ eError = OSUninstallDeviceLISR(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallDeviceLISR failed"));
+ return eError;
+ }
+ }
+#endif
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR))
+ {
+ eError = OSUninstallMISR(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
+ return eError;
+ }
+ }
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV))
+ {
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ PVR_ASSERT(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS));
+
+ eError = EnableSGXClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: EnableSGXClocks failed"));
+ return eError;
+ }
+#endif
+
+
+ eError = PVRSRVDeinitialiseDevice (gui32SGXDeviceID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
+ return eError;
+ }
+ }
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME))
+ {
+ eError = SysPMRuntimeUnregister();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to unregister with OSPM!"));
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ }
+
+
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
+ {
+ DisableSystemClocks(gpsSysData);
+ }
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA))
+ {
+ eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
+ return eError;
+ }
+ }
+
+ SysDeinitialiseCommon(gpsSysData);
+
+#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED)
+ if(gsSGXRegsCPUVAddr != IMG_NULL)
+ {
+#if defined(NO_HARDWARE)
+
+ OSBaseFreeContigMemory(SYS_OMAP4430_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase);
+#else
+#if defined(SGX_OCP_REGS_ENABLED)
+ OSUnMapPhysToLin(gsSGXRegsCPUVAddr,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ gpvOCPRegsLinAddr = IMG_NULL;
+#endif
+#endif
+ gsSGXRegsCPUVAddr = IMG_NULL;
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+ }
+#endif
+
+
+ gpsSysSpecificData->ui32SysSpecificData = 0;
+ gpsSysSpecificData->bSGXInitComplete = IMG_FALSE;
+
+ gpsSysData = IMG_NULL;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_VOID **ppvDeviceMap)
+{
+
+ switch(eDeviceType)
+ {
+ case PVRSRV_DEVICE_TYPE_SGX:
+ {
+
+ *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
+
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
+ }
+ }
+ return PVRSRV_OK;
+}
+
+
+IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_CPU_PHYADDR CpuPAddr)
+{
+ IMG_DEV_PHYADDR DevPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+
+ DevPAddr.uiAddr = CpuPAddr.uiAddr;
+
+ return DevPAddr;
+}
+
+IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
+{
+ IMG_CPU_PHYADDR cpu_paddr;
+
+
+ cpu_paddr.uiAddr = sys_paddr.uiAddr;
+ return cpu_paddr;
+}
+
+IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
+{
+ IMG_SYS_PHYADDR sys_paddr;
+
+
+ sys_paddr.uiAddr = cpu_paddr.uiAddr;
+ return sys_paddr;
+}
+
+
+IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
+{
+ IMG_DEV_PHYADDR DevPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+
+ DevPAddr.uiAddr = SysPAddr.uiAddr;
+
+ return DevPAddr;
+}
+
+
+IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
+{
+ IMG_SYS_PHYADDR SysPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+
+ SysPAddr.uiAddr = DevPAddr.uiAddr;
+
+ return SysPAddr;
+}
+
+
+IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+
+IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+
+IMG_UINT32 SysGetInterruptSource(SYS_DATA *psSysData,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#if defined(NO_HARDWARE)
+
+ return 0xFFFFFFFF;
+#else
+
+ return psDeviceNode->ui32SOCInterruptBit;
+#endif
+}
+
+
+IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#else
+#if defined(SGX_OCP_NO_INT_BYPASS)
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1);
+#endif
+
+ OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR);
+#endif
+}
+
+#if defined(SGX_OCP_NO_INT_BYPASS)
+IMG_VOID SysEnableSGXInterrupts(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
+ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_LISR) && !SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_SET_2, 0x1);
+ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
+ }
+}
+
+IMG_VOID SysDisableSGXInterrupts(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
+
+ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_CLR_2, 0x1);
+ SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
+ }
+}
+#endif
+
+PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (eNewPowerState == PVRSRV_SYS_POWER_STATE_D3)
+ {
+ PVR_TRACE(("SysSystemPrePowerState: Entering state D3"));
+
+#if defined(SYS_USING_INTERRUPTS)
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
+ {
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData);
+#endif
+ eError = OSUninstallDeviceLISR(gpsSysData);
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ if (bWrapped)
+ {
+ UnwrapSystemPowerChange(&gsSysSpecificData);
+ }
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSUninstallDeviceLISR failed (%d)", eError));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+ }
+#endif
+
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
+ {
+ DisableSystemClocks(gpsSysData);
+
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+ }
+ }
+
+ return eError;
+}
+
+
+PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (eNewPowerState == PVRSRV_SYS_POWER_STATE_D0)
+ {
+ PVR_TRACE(("SysSystemPostPowerState: Entering state D0"));
+
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS))
+ {
+ eError = EnableSystemClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: EnableSystemClocksWrap failed (%d)", eError));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);
+ }
+
+#if defined(SYS_USING_INTERRUPTS)
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR))
+ {
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData);
+#endif
+
+ eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ if (bWrapped)
+ {
+ UnwrapSystemPowerChange(&gsSysSpecificData);
+ }
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSInstallDeviceLISR failed to install ISR (%d)", eError));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
+ }
+#endif
+ }
+ return eError;
+}
+
+
+PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
+
+ if (ui32DeviceIndex != gui32SGXDeviceID)
+ {
+ return PVRSRV_OK;
+ }
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePrePowerState: SGX Entering state D3"));
+ DisableSGXClocks(gpsSysData);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(eNewPowerState );
+#endif
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(eNewPowerState);
+
+ if (ui32DeviceIndex != gui32SGXDeviceID)
+ {
+ return eError;
+ }
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePostPowerState: SGX Leaving state D3"));
+ eError = EnableSGXClocksWrap(gpsSysData);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
+#endif
+
+ return eError;
+}
+
+
+PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
+ IMG_VOID *pvIn,
+ IMG_UINT32 ulInSize,
+ IMG_VOID *pvOut,
+ IMG_UINT32 ulOutSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32ID);
+ PVR_UNREFERENCED_PARAMETER(pvIn);
+ PVR_UNREFERENCED_PARAMETER(ulInSize);
+ PVR_UNREFERENCED_PARAMETER(pvOut);
+ PVR_UNREFERENCED_PARAMETER(ulOutSize);
+
+ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
+ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
+ {
+
+ PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*) pvOut;
+ psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM;
+ return PVRSRV_OK;
+ }
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+}
--- /dev/null
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SOCCONFIG_H__)
+#define __SOCCONFIG_H__
+
+#define VS_PRODUCT_NAME "OMAP4"
+
+#if defined(SGX540) && (SGX_CORE_REV == 120)
+#define SYS_SGX_CLOCK_SPEED 307200000
+#else
+#define SYS_SGX_CLOCK_SPEED 304742400
+#endif
+
+#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
+#define SYS_SGX_PDS_TIMER_FREQ (1000)
+
+#if !defined(SYS_SGX_ACTIVE_POWER_LATENCY_MS)
+#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (2)
+#endif
+
+
+#define SYS_OMAP4430_SGX_REGS_SYS_PHYS_BASE 0x56000000
+
+#define SYS_OMAP4430_SGX_REGS_SIZE 0xFFFF
+
+#define SYS_OMAP4430_SGX_IRQ 53
+
+#define SYS_OMAP4430_GP11TIMER_ENABLE_SYS_PHYS_BASE 0x48088038
+#define SYS_OMAP4430_GP11TIMER_REGS_SYS_PHYS_BASE 0x4808803C
+#define SYS_OMAP4430_GP11TIMER_TSICR_SYS_PHYS_BASE 0x48088054
+
+#if defined(__linux__)
+#define SYS_SGX_DEV_NAME "omap_gpu"
+#endif
+
+
+#endif
--- /dev/null
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#define MAX_HW_TIME_US (1000000)
+#define WAIT_TRY_COUNT (20000)
+#else
+#define MAX_HW_TIME_US (500000)
+#define WAIT_TRY_COUNT (10000)
+#endif
+
+
+#define SYS_DEVICE_COUNT 15
+
+#endif
--- /dev/null
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SYSLOCAL_H__)
+#define __SYSLOCAL_H__
+
+#if defined(__linux__)
+
+#include <linux/version.h>
+#include <linux/clk.h>
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#include <linux/mutex.h>
+#else
+#include <linux/spinlock.h>
+#endif
+#include <asm/atomic.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
+#include <linux/semaphore.h>
+#include <linux/resource.h>
+#else
+#include <asm/semaphore.h>
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
+#include <asm/arch/resource.h>
+#endif
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if !defined(LDM_PLATFORM)
+#error "LDM_PLATFORM must be set"
+#endif
+#define PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO
+#include <linux/platform_device.h>
+#endif
+
+#if ((defined(DEBUG) || defined(TIMING)) && \
+ (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,34))) && \
+ !defined(PVR_NO_OMAP_TIMER)
+#define PVR_OMAP4_TIMING_PRCM
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#include <plat/gpu.h>
+#if !defined(PVR_NO_OMAP_TIMER)
+#define PVR_OMAP_USE_DM_TIMER_API
+#include <plat/dmtimer.h>
+#endif
+#endif
+
+#if !defined(PVR_NO_OMAP_TIMER)
+#define PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA
+#endif
+#endif
+
+#if !defined(NO_HARDWARE) && \
+ defined(SYS_USING_INTERRUPTS) && \
+ defined(SGX540)
+#define SGX_OCP_REGS_ENABLED
+#endif
+
+#if defined(__linux__)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) && defined(SGX_OCP_REGS_ENABLED)
+#define SGX_OCP_NO_INT_BYPASS
+#endif
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData);
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData);
+
+IMG_VOID DisableSGXClocks(SYS_DATA *psSysData);
+PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData);
+
+#define SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS 0x00000001
+#define SYS_SPECIFIC_DATA_ENABLE_LISR 0x00000002
+#define SYS_SPECIFIC_DATA_ENABLE_MISR 0x00000004
+#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA 0x00000008
+#define SYS_SPECIFIC_DATA_ENABLE_LOCDEV 0x00000010
+#define SYS_SPECIFIC_DATA_ENABLE_REGDEV 0x00000020
+#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT 0x00000040
+#define SYS_SPECIFIC_DATA_ENABLE_INITDEV 0x00000080
+#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV 0x00000100
+
+#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00000200
+#define SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS 0x00000400
+#define SYS_SPECIFIC_DATA_ENABLE_OCPREGS 0x00000800
+#define SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME 0x00001000
+#if defined(SGX_OCP_REGS_ENABLED) && defined(SGX_OCP_NO_INT_BYPASS)
+#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00002000
+#endif
+
+#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
+
+#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
+
+#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
+
+typedef struct _SYS_SPECIFIC_DATA_TAG_
+{
+ IMG_UINT32 ui32SysSpecificData;
+ PVRSRV_DEVICE_NODE *psSGXDevNode;
+ IMG_BOOL bSGXInitComplete;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ IMG_CPU_PHYADDR sTimerRegPhysBase;
+#endif
+#if !defined(__linux__)
+ IMG_BOOL bSGXClocksEnabled;
+#endif
+ IMG_UINT32 ui32SrcClockDiv;
+#if defined(__linux__)
+ IMG_BOOL bSysClocksOneTimeInit;
+ atomic_t sSGXClocksEnabled;
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+ struct mutex sPowerLock;
+#else
+ IMG_BOOL bConstraintNotificationsEnabled;
+ spinlock_t sPowerLock;
+ atomic_t sPowerLockCPU;
+ spinlock_t sNotifyLock;
+ atomic_t sNotifyLockCPU;
+ IMG_BOOL bCallVDD2PostFunc;
+#endif
+#if defined(DEBUG) || defined(TIMING)
+ struct clk *psGPT11_FCK;
+ struct clk *psGPT11_ICK;
+#endif
+#if defined(PVR_OMAP_USE_DM_TIMER_API)
+ struct omap_dm_timer *psGPTimer;
+#endif
+#endif
+} SYS_SPECIFIC_DATA;
+
+extern SYS_SPECIFIC_DATA *gpsSysSpecificData;
+
+#if defined(SGX_OCP_REGS_ENABLED) && defined(SGX_OCP_NO_INT_BYPASS)
+IMG_VOID SysEnableSGXInterrupts(SYS_DATA* psSysData);
+IMG_VOID SysDisableSGXInterrupts(SYS_DATA* psSysData);
+#else
+#define SysEnableSGXInterrupts(psSysData)
+#define SysDisableSGXInterrupts(psSysData)
+#endif
+
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
+IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
+#endif
+
+#if defined(__linux__)
+
+PVRSRV_ERROR SysPMRuntimeRegister(void);
+PVRSRV_ERROR SysPMRuntimeUnregister(void);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysPMRuntimeRegister)
+#endif
+static INLINE PVRSRV_ERROR SysPMRuntimeRegister(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysPMRuntimeUnregister)
+#endif
+static INLINE PVRSRV_ERROR SysPMRuntimeUnregister(void)
+{
+ return PVRSRV_OK;
+}
+
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
+
+
--- /dev/null
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if defined(__linux__)
+#include "sysutils_linux.c"
+#endif
+
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include "sgxinfokm.h"
#include "syslocal.h"
-#if !defined(PVR_LINUX_USING_WORKQUEUES)
-#error "PVR_LINUX_USING_WORKQUEUES must be defined"
-#endif
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#define ONE_MHZ 1000000
#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
#define SGX_PARENT_CLOCK "core_ck"
#endif
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+extern struct platform_device *gpsPVRLDMDev;
+#endif
static PVRSRV_ERROR PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData, IMG_BOOL bTryLock)
{
- if (!in_interrupt())
- {
- if (bTryLock)
- {
- int locked = mutex_trylock(&psSysSpecData->sPowerLock);
- if (locked == 0)
- {
- return PVRSRV_ERROR_RETRY;
- }
- }
- else
- {
- mutex_lock(&psSysSpecData->sPowerLock);
- }
- }
-
- return PVRSRV_OK;
+ if (!in_interrupt())
+ {
+ if (bTryLock)
+ {
+ int locked = mutex_trylock(&psSysSpecData->sPowerLock);
+ if (locked == 0)
+ {
+ return PVRSRV_ERROR_RETRY;
+ }
+ }
+ else
+ {
+ mutex_lock(&psSysSpecData->sPowerLock);
+ }
+ }
+
+ return PVRSRV_OK;
}
static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA *psSysSpecData)
{
- if (!in_interrupt())
- {
- mutex_unlock(&psSysSpecData->sPowerLock);
- }
+ if (!in_interrupt())
+ {
+ mutex_unlock(&psSysSpecData->sPowerLock);
+ }
}
PVRSRV_ERROR SysPowerLockWrap(IMG_BOOL bTryLock)
{
- SYS_DATA *psSysData;
+ SYS_DATA *psSysData;
- SysAcquireData(&psSysData);
+ SysAcquireData(&psSysData);
- return PowerLockWrap(psSysData->pvSysSpecificData, bTryLock);
+ return PowerLockWrap(psSysData->pvSysSpecificData, bTryLock);
}
IMG_VOID SysPowerLockUnwrap(IMG_VOID)
{
- SYS_DATA *psSysData;
+ SYS_DATA *psSysData;
- SysAcquireData(&psSysData);
+ SysAcquireData(&psSysData);
- PowerLockUnwrap(psSysData->pvSysSpecificData);
+ PowerLockUnwrap(psSysData->pvSysSpecificData);
}
-
-
IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
{
return IMG_TRUE;
{
IMG_UINT32 rate;
-#if defined(NO_HARDWARE)
rate = SYS_SGX_CLOCK_SPEED;
-#else
+#if !defined(NO_HARDWARE)
PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
-
- rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
- PVR_ASSERT(rate != 0);
#endif
psTimingInfo->ui32CoreClockSpeed = rate;
psTimingInfo->ui32HWRecoveryFreq = scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);
{
#if !defined(NO_HARDWARE)
SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- long lNewRate;
- long lRate;
- IMG_INT res;
if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));
-#if defined(DEBUG)
- {
-
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psMPU_CK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: CPU Clock is %dMhz", HZ_TO_MHZ(rate)));
- }
-#endif
-
- res = clk_enable(psSysSpecData->psSGX_FCK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
-
- res = clk_enable(psSysSpecData->psSGX_ICK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", res));
-
- clk_disable(psSysSpecData->psSGX_FCK);
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
-
- lNewRate = clk_round_rate(psSysSpecData->psSGX_FCK, SYS_SGX_CLOCK_SPEED + ONE_MHZ);
- if (lNewRate <= 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));
- return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE;
- }
-
-
- lRate = clk_get_rate(psSysSpecData->psSGX_FCK);
- if (lRate != lNewRate)
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
{
- res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);
+
+ int res = pm_runtime_get_sync(&gpsPVRLDMDev->dev);
if (res < 0)
{
- PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res));
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: pm_runtime_get_sync failed (%d)", -res));
+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
}
}
-
-#if defined(DEBUG)
- {
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
- }
#endif
+ SysEnableSGXInterrupts(psSysData);
atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);
PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));
- if (psSysSpecData->psSGX_ICK)
- {
- clk_disable(psSysSpecData->psSGX_ICK);
- }
+ SysDisableSGXInterrupts(psSysData);
- if (psSysSpecData->psSGX_FCK)
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
{
- clk_disable(psSysSpecData->psSGX_FCK);
+ int res = pm_runtime_put_sync(&gpsPVRLDMDev->dev);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: pm_runtime_put_sync failed (%d)", -res));
+ }
}
+#endif
atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
#endif
}
-PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+#if (defined(DEBUG) || defined(TIMING)) && !defined(PVR_NO_OMAP_TIMER)
+#if defined(PVR_OMAP_USE_DM_TIMER_API)
+#define GPTIMER_TO_USE 11
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- struct clk *psCLK;
- IMG_INT res;
- PVRSRV_ERROR eError;
+ PVR_ASSERT(psSysSpecData->psGPTimer == NULL);
-#if defined(DEBUG) || defined(TIMING)
- IMG_INT rate;
- struct clk *sys_ck;
- IMG_CPU_PHYADDR TimerRegPhysBase;
- IMG_HANDLE hTimerEnable;
- IMG_UINT32 *pui32TimerEnable;
+
+ psSysSpecData->psGPTimer = omap_dm_timer_request_specific(GPTIMER_TO_USE);
+ if (psSysSpecData->psGPTimer == NULL)
+ {
+
+ PVR_DPF((PVR_DBG_WARNING, "%s: omap_dm_timer_request_specific failed", __FUNCTION__));
+ return PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
+ }
-#endif
+
+ omap_dm_timer_set_source(psSysSpecData->psGPTimer, OMAP_TIMER_SRC_SYS_CLK);
+ omap_dm_timer_enable(psSysSpecData->psGPTimer);
- PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ omap_dm_timer_set_load_start(psSysSpecData->psGPTimer, 1, 0);
- if (!psSysSpecData->bSysClocksOneTimeInit)
+ omap_dm_timer_start(psSysSpecData->psGPTimer);
+
+
+ psSysSpecData->sTimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_REGS_SYS_PHYS_BASE;
+
+ return PVRSRV_OK;
+}
+
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ if (psSysSpecData->psGPTimer != NULL)
{
- mutex_init(&psSysSpecData->sPowerLock);
+
+ (void) omap_dm_timer_stop(psSysSpecData->psGPTimer);
- atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+ omap_dm_timer_disable(psSysSpecData->psGPTimer);
- psCLK = clk_get(NULL, SGX_PARENT_CLOCK);
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get Core Clock"));
- goto ExitError;
- }
- psSysSpecData->psCORE_CK = psCLK;
+ omap_dm_timer_free(psSysSpecData->psGPTimer);
- psCLK = clk_get(NULL, "sgx_fck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
- goto ExitError;
- }
- psSysSpecData->psSGX_FCK = psCLK;
+ psSysSpecData->sTimerRegPhysBase.uiAddr = 0;
- psCLK = clk_get(NULL, "sgx_ick");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get SGX Interface Clock"));
- goto ExitError;
- }
- psSysSpecData->psSGX_ICK = psCLK;
+ psSysSpecData->psGPTimer = NULL;
+ }
-#if defined(DEBUG)
- psCLK = clk_get(NULL, "mpu_ck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get MPU Clock"));
- goto ExitError;
- }
- psSysSpecData->psMPU_CK = psCLK;
+}
+#else
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+#if defined(PVR_OMAP4_TIMING_PRCM)
+ struct clk *psCLK;
+ IMG_INT res;
+ struct clk *sys_ck;
+ IMG_INT rate;
#endif
- res = clk_set_parent(psSysSpecData->psSGX_FCK, psSysSpecData->psCORE_CK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set SGX parent clock (%d)", res));
- goto ExitError;
- }
+ PVRSRV_ERROR eError;
- psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
- }
+ IMG_CPU_PHYADDR sTimerRegPhysBase;
+ IMG_HANDLE hTimerEnable;
+ IMG_UINT32 *pui32TimerEnable;
+
+ PVR_ASSERT(psSysSpecData->sTimerRegPhysBase.uiAddr == 0);
-#if defined(DEBUG) || defined(TIMING)
+#if defined(PVR_OMAP4_TIMING_PRCM)
psCLK = clk_get(NULL, "gpt11_fck");
if (IS_ERR(psCLK))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
psSysSpecData->psGPT11_FCK = psCLK;
if (IS_ERR(psCLK))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
psSysSpecData->psGPT11_ICK = psCLK;
- sys_ck = clk_get(NULL, "sys_ck");
+ sys_ck = clk_get(NULL, "sys_clkin_ck");
if (IS_ERR(sys_ck))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck)
if (res < 0)
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
}
if (res < 0)
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
res = clk_enable(psSysSpecData->psGPT11_ICK);
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
goto ExitDisableGPT11FCK;
}
+#endif
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
+ sTimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_TSICR_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
&hTimerEnable);
goto ExitDisableGPT11ICK;
}
- rate = *pui32TimerEnable;
- if(!(rate & 4))
+ if(!(*pui32TimerEnable & 4))
{
PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
- *pui32TimerEnable = rate | 4;
+ *pui32TimerEnable |= 4;
}
OSUnMapPhysToLin(pui32TimerEnable,
hTimerEnable);
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
+ sTimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
&hTimerEnable);
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
hTimerEnable);
-#endif
+ psSysSpecData->sTimerRegPhysBase = sTimerRegPhysBase;
eError = PVRSRV_OK;
+
goto Exit;
-#if defined(DEBUG) || defined(TIMING)
ExitDisableGPT11ICK:
+#if defined(PVR_OMAP4_TIMING_PRCM)
clk_disable(psSysSpecData->psGPT11_ICK);
ExitDisableGPT11FCK:
clk_disable(psSysSpecData->psGPT11_FCK);
-ExitUnRegisterConstraintNotifications:
-#endif
ExitError:
- eError = PVRSRV_ERROR_DISABLE_CLOCK_FAILURE;
+#endif
+ eError = PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
Exit:
return eError;
}
-IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
{
-#if defined(DEBUG) || defined(TIMING)
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- IMG_CPU_PHYADDR TimerRegPhysBase;
IMG_HANDLE hTimerDisable;
IMG_UINT32 *pui32TimerDisable;
-#endif
-
- PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
-
- DisableSGXClocks(psSysData);
+ if (psSysSpecData->sTimerRegPhysBase.uiAddr == 0)
+ {
+ return;
+ }
-#if defined(DEBUG) || defined(TIMING)
- TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
- pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,
+ pui32TimerDisable = OSMapPhysToLin(psSysSpecData->sTimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
&hTimerDisable);
hTimerDisable);
}
+ psSysSpecData->sTimerRegPhysBase.uiAddr = 0;
+
+#if defined(PVR_OMAP4_TIMING_PRCM)
clk_disable(psSysSpecData->psGPT11_ICK);
clk_disable(psSysSpecData->psGPT11_FCK);
+#endif
+}
+#endif
+#else
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysSpecData);
+ return PVRSRV_OK;
+}
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysSpecData);
+}
#endif
+
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+ PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ if (!psSysSpecData->bSysClocksOneTimeInit)
+ {
+ mutex_init(&psSysSpecData->sPowerLock);
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+
+ psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
+ }
+
+ return AcquireGPTimer(psSysSpecData);
+}
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+ PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
+
+
+ DisableSGXClocks(psSysData);
+
+ ReleaseGPTimer(psSysSpecData);
+}
+
+PVRSRV_ERROR SysPMRuntimeRegister(void)
+{
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+ pm_runtime_enable(&gpsPVRLDMDev->dev);
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysPMRuntimeUnregister(void)
+{
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+ pm_runtime_disable(&gpsPVRLDMDev->dev);
+#endif
+ return PVRSRV_OK;
}
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
*
******************************************************************************/
+#include "sysconfig.h"
#include "services_headers.h"
#include "kerneldisplay.h"
#include "oemfuncs.h"
#include "sgxinfo.h"
#include "sgxinfokm.h"
#include "syslocal.h"
-#include "sysconfig.h"
#include "ocpdefs.h"
-#if !defined(NO_HARDWARE) && \
- defined(SYS_USING_INTERRUPTS) && \
- defined(SGX530) && (SGX_CORE_REV == 125)
-#define SGX_OCP_REGS_ENABLED
-#endif
-
SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
SYS_DATA gsSysData;
#define DEVICE_SGX_INTERRUPT (1 << 0)
-#if defined(NO_HARDWARE)
+#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED)
static IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr;
#endif
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+extern struct platform_device *gpsPVRLDMDev;
+#endif
+
IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32 Ioctl,
IMG_BYTE *pInBuf,
IMG_UINT32 InBufLen,
IMG_UINT32 OutBufLen,
IMG_UINT32 *pdwBytesTransferred);
-#if defined(DEBUG) && defined(DUMP_OMAP34xx_CLOCKS) && defined(__linux__)
-
-#pragma GCC diagnostic ignored "-Wstrict-prototypes"
-#include <mach/clock.h>
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
-#include <../mach-omap2/clock_34xx.h>
-#define ONCHIP_CLKS onchip_clks
-#else
-#include <../mach-omap2/clock34xx.h>
-#define ONCHIP_CLKS onchip_34xx_clks
-#endif
-
-static void omap3_clk_recalc(struct clk *clk) {}
-static void omap3_followparent_recalc(struct clk *clk) {}
-static void omap3_propagate_rate(struct clk *clk) {}
-static void omap3_table_recalc(struct clk *clk) {}
-static long omap3_round_to_table_rate(struct clk *clk, unsigned long rate) { return 0; }
-static int omap3_select_table_rate(struct clk *clk, unsigned long rate) { return 0; }
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
-static void omap3_dpll_recalc(struct clk *clk, unsigned long parent_rate,
- u8 rate_storage) {}
-static void omap3_clkoutx2_recalc(struct clk *clk, unsigned long parent_rate,
- u8 rate_storage) {}
-static void omap3_dpll_allow_idle(struct clk *clk) {}
-static void omap3_dpll_deny_idle(struct clk *clk) {}
-static u32 omap3_dpll_autoidle_read(struct clk *clk) { return 0; }
-static int omap3_noncore_dpll_enable(struct clk *clk) { return 0; }
-static void omap3_noncore_dpll_disable(struct clk *clk) {}
-static int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate) { return 0; }
-static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) { return 0; }
-void followparent_recalc(struct clk *clk, unsigned long new_parent_rate,
- u8 rate_storage) {}
-long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate) { return 0; }
-void omap2_clksel_recalc(struct clk *clk, unsigned long new_parent_rate,
- u8 rate_storage) {}
-long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate) { return 0; }
-int omap2_clksel_set_rate(struct clk *clk, unsigned long rate) { return 0; }
-void omap2_fixed_divisor_recalc(struct clk *clk, unsigned long new_parent_rate,
- u8 rate_storage) {}
-void omap2_init_clksel_parent(struct clk *clk) {}
-#endif
-
-static void dump_omap34xx_clocks(void)
-{
- struct clk **c;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
- struct vdd_prcm_config *t1 = vdd1_rate_table;
- struct vdd_prcm_config *t2 = vdd2_rate_table;
-
- t1 = t1;
- t2 = t2;
-#else
-
- omap3_dpll_allow_idle(0);
- omap3_dpll_deny_idle(0);
- omap3_dpll_autoidle_read(0);
- omap3_clk_recalc(0);
- omap3_followparent_recalc(0);
- omap3_propagate_rate(0);
- omap3_table_recalc(0);
- omap3_round_to_table_rate(0, 0);
- omap3_select_table_rate(0, 0);
-#endif
-
- for(c = ONCHIP_CLKS; c < ONCHIP_CLKS + ARRAY_SIZE(ONCHIP_CLKS); c++)
- {
- struct clk *cp = *c, *copy;
- unsigned long rate;
- copy = clk_get(NULL, cp->name);
- if(!copy)
- continue;
- rate = clk_get_rate(copy);
- if (rate < 1000000)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: clock %s is %lu KHz (%lu Hz)", __func__, cp->name, rate/1000, rate));
- }
- else
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: clock %s is %lu MHz (%lu Hz)", __func__, cp->name, rate/1000000, rate));
- }
- }
-}
-
-#else
-
-static INLINE void dump_omap34xx_clocks(void) {}
-
-#endif
-
#if defined(SGX_OCP_REGS_ENABLED)
-#define SYS_TI335x_OCP_REGS_SYS_PHYS_BASE (SYS_TI335x_SGX_REGS_SYS_PHYS_BASE + EUR_CR_OCP_REVISION)
-#define SYS_TI335x_OCP_REGS_SIZE 0x110
-
static IMG_CPU_VIRTADDR gpvOCPRegsLinAddr;
static PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
{
PVRSRV_ERROR eError = EnableSGXClocks(psSysData);
+#if !defined(SGX_OCP_NO_INT_BYPASS)
if(eError == PVRSRV_OK)
{
- OSWriteHWReg(gpvOCPRegsLinAddr,
- EUR_CR_OCP_DEBUG_CONFIG - EUR_CR_OCP_REVISION,
- EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_SYSCONFIG, 0x14);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_DEBUG_CONFIG, EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
}
-
+#endif
return eError;
}
if(eError == PVRSRV_OK)
{
- EnableSGXClocksWrap(psSysData);
+ eError = EnableSGXClocksWrap(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ DisableSystemClocks(psSysData);
+ }
}
#endif
#if defined(NO_HARDWARE)
PVRSRV_ERROR eError;
IMG_CPU_PHYADDR sCpuPAddr;
+#else
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+ struct resource *dev_res;
+ int dev_irq;
+#endif
#endif
PVR_UNREFERENCED_PARAMETER(psSysData);
#if defined(NO_HARDWARE)
- eError = OSBaseAllocContigMemory(SYS_TI335x_SGX_REGS_SIZE,
+ gsSGXDeviceMap.ui32RegsSize = SYS_TI335x_SGX_REGS_SIZE;
+
+ eError = OSBaseAllocContigMemory(gsSGXDeviceMap.ui32RegsSize,
&gsSGXRegsCPUVAddr,
&sCpuPAddr);
if(eError != PVRSRV_OK)
}
gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;
gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);
- gsSGXDeviceMap.ui32RegsSize = SYS_TI335x_SGX_REGS_SIZE;
#if defined(__linux__)
gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL;
#endif
- OSMemSet(gsSGXRegsCPUVAddr, 0, SYS_TI335x_SGX_REGS_SIZE);
+ OSMemSet(gsSGXRegsCPUVAddr, 0, gsSGXDeviceMap.ui32RegsSize);
gsSGXDeviceMap.ui32IRQ = 0;
#else
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+
+ dev_res = platform_get_resource(gpsPVRLDMDev, IORESOURCE_MEM, 0);
+ if (dev_res == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_resource failed", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+ dev_irq = platform_get_irq(gpsPVRLDMDev, 0);
+ if (dev_irq < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_irq failed (%d)", __FUNCTION__, -dev_irq));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ gsSGXDeviceMap.sRegsSysPBase.uiAddr = dev_res->start;
+ gsSGXDeviceMap.sRegsCpuPBase =
+ SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
+ PVR_TRACE(("SGX register base: 0x%lx", (unsigned long)gsSGXDeviceMap.sRegsCpuPBase.uiAddr));
+
+ gsSGXDeviceMap.ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start);
+ PVR_TRACE(("SGX register size: %d",gsSGXDeviceMap.ui32RegsSize));
+
+ gsSGXDeviceMap.ui32IRQ = dev_irq;
+ PVR_TRACE(("SGX IRQ: %d", gsSGXDeviceMap.ui32IRQ));
+#else
gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_TI335x_SGX_REGS_SYS_PHYS_BASE;
gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
gsSGXDeviceMap.ui32RegsSize = SYS_TI335x_SGX_REGS_SIZE;
gsSGXDeviceMap.ui32IRQ = SYS_TI335x_SGX_IRQ;
+#endif
+#if defined(SGX_OCP_REGS_ENABLED)
+ gsSGXRegsCPUVAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ if (gsSGXRegsCPUVAddr == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Failed to map SGX registers"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+ gpvOCPRegsLinAddr = gsSGXRegsCPUVAddr;
+#endif
#endif
#if defined(PDUMP)
}
-IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion)
+static IMG_CHAR *SysCreateVersionString(void)
{
static IMG_CHAR aszVersionString[100];
SYS_DATA *psSysData;
#if !defined(NO_HARDWARE)
IMG_VOID *pvRegsLinAddr;
- pvRegsLinAddr = OSMapPhysToLin(sRegRegion,
+ pvRegsLinAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
SYS_TI335x_SGX_REGS_SIZE,
PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
IMG_NULL);
IMG_UINT32 i;
PVRSRV_ERROR eError;
PVRSRV_DEVICE_NODE *psDeviceNode;
+#if !defined(PVR_NO_OMAP_TIMER)
IMG_CPU_PHYADDR TimerRegPhysBase;
+#endif
#if !defined(SGX_DYNAMIC_TIMING_INFO)
SGX_TIMING_INFORMATION* psTimingInfo;
#endif
return eError;
}
- TimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_REGS_SYS_PHYS_BASE;
- gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
- gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
- OSReservePhys(TimerRegPhysBase,
- 4,
- PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
- (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
- &gpsSysData->hSOCTimerRegisterOSMemHandle);
-
#if !defined(SGX_DYNAMIC_TIMING_INFO)
psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
-
psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
psTimingInfo->bEnableActivePM = IMG_TRUE;
return eError;
}
SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);
-
-#if defined(SGX_OCP_REGS_ENABLED)
+#if 0
+ eError = SysPMRuntimeRegister();
+ if (eError != PVRSRV_OK)
{
- IMG_SYS_PHYADDR sOCPRegsSysPBase;
- IMG_CPU_PHYADDR sOCPRegsCpuPBase;
-
- sOCPRegsSysPBase.uiAddr = SYS_TI335x_OCP_REGS_SYS_PHYS_BASE;
- sOCPRegsCpuPBase = SysSysPAddrToCpuPAddr(sOCPRegsSysPBase);
-
- gpvOCPRegsLinAddr = OSMapPhysToLin(sOCPRegsCpuPBase,
- SYS_TI335x_OCP_REGS_SIZE,
- PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
- IMG_NULL);
-
- if (gpvOCPRegsLinAddr == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to map OCP registers"));
- return PVRSRV_ERROR_BAD_MAPPING;
- }
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register with OSPM!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
}
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME);
#endif
-
}
#endif
- dump_omap34xx_clocks();
-
eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);
if (eError != PVRSRV_OK)
{
DisableSGXClocks(gpsSysData);
#endif
+#if !defined(PVR_NO_OMAP_TIMER)
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ TimerRegPhysBase = gsSysSpecificData.sTimerRegPhysBase;
+#else
+ TimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_REGS_SYS_PHYS_BASE;
+#endif
+ gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
+ gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
+ if (TimerRegPhysBase.uiAddr != 0)
+ {
+ OSReservePhys(TimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
+ &gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+#endif
+
return PVRSRV_OK;
}
eError = EnableSGXClocksWrap(gpsSysData);
if (eError != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
- (IMG_VOID)SysDeinitialise(gpsSysData);
- gpsSysData = IMG_NULL;
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to Enable SGX clocks (%d)", eError));
return eError;
}
#endif
-#if defined(SYS_USING_INTERRUPTS)
-
eError = OSInstallMISR(gpsSysData);
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install MISR"));
- (IMG_VOID)SysDeinitialise(gpsSysData);
- gpsSysData = IMG_NULL;
return eError;
}
SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR);
+#if defined(SYS_USING_INTERRUPTS)
eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install ISR"));
- (IMG_VOID)SysDeinitialise(gpsSysData);
- gpsSysData = IMG_NULL;
return eError;
}
SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
#endif
+#if defined(__linux__)
- gpsSysData->pszVersionString = SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
+ gpsSysData->pszVersionString = SysCreateVersionString();
if (!gpsSysData->pszVersionString)
{
PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string"));
}
else
{
- PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
+ PVR_TRACE(("SysFinalise: Version string: %s", gpsSysData->pszVersionString));
}
+#endif
#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
{
PVRSRV_ERROR eError;
+ if(gpsSysData->pvSOCTimerRegisterKM)
+ {
+ OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+
#if defined(SYS_USING_INTERRUPTS)
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
{
return eError;
}
}
+#endif
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR))
{
return eError;
}
}
-#else
- PVR_UNREFERENCED_PARAMETER(psSysData);
-#endif
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV))
{
return eError;
}
}
-
-#if defined(SGX_OCP_REGS_ENABLED)
- OSUnMapPhysToLin(gpvOCPRegsLinAddr,
- SYS_TI335x_OCP_REGS_SIZE,
- PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
- IMG_NULL);
+#if 0
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME))
+ {
+ eError = SysPMRuntimeUnregister();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to unregister with OSPM!"));
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ }
#endif
-
if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
}
}
- if(gpsSysData->pvSOCTimerRegisterKM)
- {
- OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
- 4,
- PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
- gpsSysData->hSOCTimerRegisterOSMemHandle);
- }
-
SysDeinitialiseCommon(gpsSysData);
-#if defined(NO_HARDWARE)
- if(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV))
+#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED)
+ if(gsSGXRegsCPUVAddr != IMG_NULL)
{
+#if defined(NO_HARDWARE)
OSBaseFreeContigMemory(SYS_TI335x_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase);
- }
+#else
+#if defined(SGX_OCP_REGS_ENABLED)
+ OSUnMapPhysToLin(gsSGXRegsCPUVAddr,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ gpvOCPRegsLinAddr = IMG_NULL;
#endif
+#endif
+ gsSGXRegsCPUVAddr = IMG_NULL;
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+ }
+#endif
gpsSysSpecificData->ui32SysSpecificData = 0;
IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
{
- PVR_UNREFERENCED_PARAMETER(psSysData);
PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
-
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#else
+#if defined(SGX_OCP_NO_INT_BYPASS)
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1);
+#endif
- OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM,
- EUR_CR_EVENT_HOST_CLEAR);
+ OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR);
+#endif
}
+#if defined(SGX_OCP_NO_INT_BYPASS)
+IMG_VOID SysEnableSGXInterrupts(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
+ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_LISR) && !SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_SET_2, 0x1);
+ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
+ }
+}
+
+IMG_VOID SysDisableSGXInterrupts(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
+
+ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_CLR_2, 0x1);
+ SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
+ }
+}
+#endif
PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
{
#if !defined(__SOCCONFIG_H__)
#define __SOCCONFIG_H__
-#include "syscommon.h"
#define VS_PRODUCT_NAME "TI335x"
#define SYS_SGX_PDS_TIMER_FREQ (1000)
#if !defined(SYS_SGX_ACTIVE_POWER_LATENCY_MS)
-#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (1)
+#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (2)
#endif
#define SYS_TI335x_SGX_IRQ 37
#define SYS_TI335x_GP7TIMER_ENABLE_SYS_PHYS_BASE 0x4804A038
-#define SYS_TI335x_GP7TIMER_REGS_SYS_PHYS_BASE 0x4804A03C
-#define SYS_TI335x_GP7TIMER_TSICR_SYS_PHYS_BASE 0x4804A054
+#define SYS_TI335x_GP7TIMER_REGS_SYS_PHYS_BASE 0x4804A03C
+#define SYS_TI335x_GP7TIMER_TSICR_SYS_PHYS_BASE 0x4804A054
+
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#if defined(PVR_LINUX_USING_WORKQUEUES)
#define MAX_HW_TIME_US (1000000)
+#define WAIT_TRY_COUNT (20000)
#else
#define MAX_HW_TIME_US (500000)
+#define WAIT_TRY_COUNT (10000)
#endif
-#define WAIT_TRY_COUNT (10000)
-#define SYS_DEVICE_COUNT 3
+#define SYS_DEVICE_COUNT 15
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#endif
#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if !defined(LDM_PLATFORM)
+#error "LDM_PLATFORM must be set"
+#endif
+/*#define PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO*/
+//#include <linux/platform_device.h>
+#endif
+
+#if ((defined(DEBUG) || defined(TIMING)) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))) && \
+ !defined(PVR_NO_OMAP_TIMER)
+#define PVR_OMAP3_TIMING_PRCM
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*#include <plat/gpu.h>*/
+#if !defined(PVR_NO_OMAP_TIMER)
+/*#define PVR_OMAP_USE_DM_TIMER_API*/
+//#include <plat/dmtimer.h>
+#endif
+#endif
+
+#if !defined(PVR_NO_OMAP_TIMER)
+//#define PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA
+#endif
#endif
+#if !defined(NO_HARDWARE) && \
+ defined(SYS_USING_INTERRUPTS) && \
+ defined(SGX540)
+#define SGX_OCP_REGS_ENABLED
+#endif
+
+#if defined(__linux__)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) && defined(SGX_OCP_REGS_ENABLED)
+/*#define SGX_OCP_NO_INT_BYPASS*/
+#endif
+#endif
+
#if defined (__cplusplus)
extern "C" {
#endif
-IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion);
-
IMG_VOID DisableSystemClocks(SYS_DATA *psSysData);
PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData);
#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00000200
#define SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS 0x00000400
+#define SYS_SPECIFIC_DATA_ENABLE_OCPREGS 0x00000800
+#define SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME 0x00001000
+#if defined(SGX_OCP_REGS_ENABLED) && defined(SGX_OCP_NO_INT_BYPASS)
+#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00002000
+#endif
#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
IMG_UINT32 ui32SysSpecificData;
PVRSRV_DEVICE_NODE *psSGXDevNode;
IMG_BOOL bSGXInitComplete;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ IMG_CPU_PHYADDR sTimerRegPhysBase;
+#endif
#if !defined(__linux__)
IMG_BOOL bSGXClocksEnabled;
#endif
atomic_t sNotifyLockCPU;
IMG_BOOL bCallVDD2PostFunc;
#endif
- struct clk *psCORE_CK;
- struct clk *psSGX_FCK;
- struct clk *psSGX_ICK;
- struct clk *psMPU_CK;
+ struct clk *psCORE_CK;
+ struct clk *psSGX_FCK;
+ struct clk *psSGX_ICK;
+
#if defined(DEBUG) || defined(TIMING)
struct clk *psGPT11_FCK;
struct clk *psGPT11_ICK;
#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
- struct constraint_handle *pVdd2Handle;
-#endif
+#if defined(PVR_OMAP_USE_DM_TIMER_API)
+ struct omap_dm_timer *psGPTimer;
+#endif
#endif
} SYS_SPECIFIC_DATA;
extern SYS_SPECIFIC_DATA *gpsSysSpecificData;
+#if defined(SGX_OCP_REGS_ENABLED) && defined(SGX_OCP_NO_INT_BYPASS)
+IMG_VOID SysEnableSGXInterrupts(SYS_DATA* psSysData);
+IMG_VOID SysDisableSGXInterrupts(SYS_DATA* psSysData);
+#else
+#define SysEnableSGXInterrupts(psSysData)
+#define SysDisableSGXInterrupts(psSysData)
+#endif
+
#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
#endif
+#if defined(__linux__)
+
+PVRSRV_ERROR SysPMRuntimeRegister(void);
+PVRSRV_ERROR SysPMRuntimeUnregister(void);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysPMRuntimeRegister)
+#endif
+static INLINE PVRSRV_ERROR SysPMRuntimeRegister(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysPMRuntimeUnregister)
+#endif
+static INLINE PVRSRV_ERROR SysPMRuntimeUnregister(void)
+{
+ return PVRSRV_OK;
+}
+
+#endif
+
#if defined(__cplusplus)
}
#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
******************************************************************************/
#if defined(__linux__)
-#if defined(PVR_LINUX_USING_WORKQUEUES)
-#include "sysutils_linux_wqueue_compat.c"
-#else
#include "sysutils_linux.c"
#endif
-#endif
/**********************************************************************
*
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hardirq.h>
-#include <linux/spinlock.h>
-#include <asm/bug.h>
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
-#include <linux/semaphore.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31))
-//#include <plat/resource.h>
-#include <plat/omap-pm.h>
-#else
-#include <mach/resource.h>
-#endif
-#else
-#include <asm/semaphore.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
-#include <asm/arch/resource.h>
-#endif
-#endif
+#include <linux/mutex.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22)) && \
- (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
-#define CONSTRAINT_NOTIFICATIONS
-#endif
#include "sgxdefs.h"
#include "services_headers.h"
#include "sysinfo.h"
#include "sgxinfokm.h"
#include "syslocal.h"
+//#include <linux/platform_device.h>
+//#include <linux/pm_runtime.h>
+
#define ONE_MHZ 1000000
#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
-#if defined(SUPPORT_OMAP3430_SGXFCLK_96M)
+#if defined(SUPPORT_TI335x_SGXFCLK_96M)
#define SGX_PARENT_CLOCK "cm_96m_fck"
#else
#define SGX_PARENT_CLOCK "core_ck"
#endif
-//#define DEBUG
-#if !defined(PDUMP) && !defined(NO_HARDWARE)
-static IMG_BOOL PowerLockWrappedOnCPU(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- IMG_INT iCPU;
- IMG_BOOL bLocked = IMG_FALSE;
-
- if (!in_interrupt())
- {
- iCPU = get_cpu();
- bLocked = (iCPU == atomic_read(&psSysSpecData->sPowerLockCPU));
-
- put_cpu();
- }
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+extern struct platform_device *gpsPVRLDMDev;
+#endif
- return bLocked;
-}
-static IMG_VOID PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData)
+static PVRSRV_ERROR PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData, IMG_BOOL bTryLock)
{
- IMG_INT iCPU;
-
- if (!in_interrupt())
- {
-
- iCPU = get_cpu();
-
-
- PVR_ASSERT(iCPU != -1);
-
- PVR_ASSERT(!PowerLockWrappedOnCPU(psSysSpecData));
-
- spin_lock(&psSysSpecData->sPowerLock);
-
- atomic_set(&psSysSpecData->sPowerLockCPU, iCPU);
- }
+ if (!in_interrupt())
+ {
+ if (bTryLock)
+ {
+ int locked = mutex_trylock(&psSysSpecData->sPowerLock);
+ if (locked == 0)
+ {
+ return PVRSRV_ERROR_RETRY;
+ }
+ }
+ else
+ {
+ mutex_lock(&psSysSpecData->sPowerLock);
+ }
+ }
+
+ return PVRSRV_OK;
}
static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA *psSysSpecData)
{
- if (!in_interrupt())
- {
- PVR_ASSERT(PowerLockWrappedOnCPU(psSysSpecData));
-
- atomic_set(&psSysSpecData->sPowerLockCPU, -1);
-
- spin_unlock(&psSysSpecData->sPowerLock);
-
- put_cpu();
- }
+ if (!in_interrupt())
+ {
+ mutex_unlock(&psSysSpecData->sPowerLock);
+ }
}
-PVRSRV_ERROR SysPowerLockWrap(SYS_DATA *psSysData)
+PVRSRV_ERROR SysPowerLockWrap(IMG_BOOL bTryLock)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ SYS_DATA *psSysData;
- PowerLockWrap(psSysSpecData);
+ SysAcquireData(&psSysData);
- return PVRSRV_OK;
+ return PowerLockWrap(psSysData->pvSysSpecificData, bTryLock);
}
-IMG_VOID SysPowerLockUnwrap(SYS_DATA *psSysData)
+IMG_VOID SysPowerLockUnwrap(IMG_VOID)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ SYS_DATA *psSysData;
- PowerLockUnwrap(psSysSpecData);
-}
-#else
-static IMG_BOOL PowerLockWrappedOnCPU(SYS_SPECIFIC_DATA unref__ *psSysSpecData)
-{
- return IMG_FALSE;
-}
+ SysAcquireData(&psSysData);
-static IMG_VOID PowerLockWrap(SYS_SPECIFIC_DATA unref__ *psSysSpecData)
-{
+ PowerLockUnwrap(psSysData->pvSysSpecificData);
}
-static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA unref__ *psSysSpecData)
-{
-}
-PVRSRV_ERROR SysPowerLockWrap(SYS_DATA unref__ *psSysData)
-{
- return PVRSRV_OK;
-}
-IMG_VOID SysPowerLockUnwrap(SYS_DATA unref__ *psSysData)
-{
-}
-#endif
IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
{
- IMG_BOOL bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
-
- if (bPowerLock)
- {
- PowerLockUnwrap(psSysSpecData);
- }
-
- return bPowerLock;
+ return IMG_TRUE;
}
IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
{
- PowerLockWrap(psSysSpecData);
}
static inline IMG_UINT32 scale_by_rate(IMG_UINT32 val, IMG_UINT32 rate1, IMG_UINT32 rate2)
static inline IMG_UINT32 scale_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)
{
-
return scale_by_rate(val, rate, SYS_SGX_CLOCK_SPEED);
}
{
IMG_UINT32 rate;
-#if defined(NO_HARDWARE)
rate = SYS_SGX_CLOCK_SPEED;
-
-#else
+#if !defined(NO_HARDWARE)
PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
-
- rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
- PVR_ASSERT(rate != 0);
#endif
psTimingInfo->ui32CoreClockSpeed = rate;
psTimingInfo->ui32HWRecoveryFreq = scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);
psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
}
-#if defined(CONSTRAINT_NOTIFICATIONS)
-#if !defined(SGX_DYNAMIC_TIMING_INFO)
-#error "SGX_DYNAMIC_TIMING_INFO must be defined for this platform"
-#endif
-
-static struct constraint_id cnstr_id_vdd2 = {
- .type = RES_OPP_CO,
- .data = (IMG_VOID *)"vdd2_opp"
-};
-
-#if !defined(PDUMP) && !defined(NO_HARDWARE)
-static inline IMG_BOOL ConstraintNotificationsEnabled(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- return (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0) && psSysSpecData->bSGXInitComplete && psSysSpecData->bConstraintNotificationsEnabled;
-
-}
-
-static IMG_BOOL NotifyLockedOnCPU(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- IMG_INT iCPU = get_cpu();
- IMG_BOOL bLocked = (iCPU == atomic_read(&psSysSpecData->sNotifyLockCPU));
-
- put_cpu();
-
- return bLocked;
-}
-
-static IMG_VOID NotifyLock(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- IMG_INT iCPU;
-
- BUG_ON(in_interrupt());
-
-
- iCPU = get_cpu();
-
-
- PVR_ASSERT(iCPU != -1);
-
- PVR_ASSERT(!NotifyLockedOnCPU(psSysSpecData));
-
- spin_lock(&psSysSpecData->sNotifyLock);
-
- atomic_set(&psSysSpecData->sNotifyLockCPU, iCPU);
-}
-
-static IMG_VOID NotifyUnlock(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- PVR_ASSERT(NotifyLockedOnCPU(psSysSpecData));
-
- atomic_set(&psSysSpecData->sNotifyLockCPU, -1);
-
- spin_unlock(&psSysSpecData->sNotifyLock);
-
- put_cpu();
-}
-
-static int VDD2PostFunc(struct notifier_block *n, unsigned long event, IMG_VOID *ptr)
-{
- PVR_UNREFERENCED_PARAMETER(n);
- PVR_UNREFERENCED_PARAMETER(event);
- PVR_UNREFERENCED_PARAMETER(ptr);
-
- if (in_interrupt())
- {
- PVR_DPF((PVR_DBG_ERROR, "%s Called in interrupt context. Ignoring.", __FUNCTION__));
- return 0;
- }
-
-
- if (!NotifyLockedOnCPU(gpsSysSpecificData))
- {
- return 0;
- }
-
-#if defined(DEBUG)
- if (ConstraintNotificationsEnabled(gpsSysSpecificData))
- {
- IMG_UINT32 rate;
-
- rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
-
- PVR_ASSERT(rate != 0);
-
- PVR_DPF((PVR_DBG_MESSAGE, "%s: SGX clock rate: %dMHz", __FUNCTION__, HZ_TO_MHZ(rate)));
- }
-#endif
- if (gpsSysSpecificData->bCallVDD2PostFunc)
- {
- PVRSRVDevicePostClockSpeedChange(gpsSysSpecificData->psSGXDevNode->sDevId.ui32DeviceIndex, IMG_TRUE, IMG_NULL);
-
- gpsSysSpecificData->bCallVDD2PostFunc = IMG_FALSE;
- }
- else
- {
- if (ConstraintNotificationsEnabled(gpsSysSpecificData))
- {
- PVR_TRACE(("%s: Not calling PVR clock speed notification functions", __FUNCTION__));
- }
- }
-
- NotifyUnlock(gpsSysSpecificData);
-
- return 0;
-}
-
-static int VDD2PreFunc(struct notifier_block *n, unsigned long event, IMG_VOID *ptr)
-{
- PVR_UNREFERENCED_PARAMETER(n);
- PVR_UNREFERENCED_PARAMETER(event);
- PVR_UNREFERENCED_PARAMETER(ptr);
-
- if (in_interrupt())
- {
- PVR_DPF((PVR_DBG_WARNING, "%s Called in interrupt context. Ignoring.", __FUNCTION__));
- return 0;
- }
-
- if (PowerLockWrappedOnCPU(gpsSysSpecificData))
- {
- PVR_DPF((PVR_DBG_WARNING, "%s Called from within a power transition. Ignoring.", __FUNCTION__));
- return 0;
- }
-
- NotifyLock(gpsSysSpecificData);
-
- PVR_ASSERT(!gpsSysSpecificData->bCallVDD2PostFunc);
-
- if (ConstraintNotificationsEnabled(gpsSysSpecificData))
- {
- PVRSRV_ERROR eError;
-
- eError = PVRSRVDevicePreClockSpeedChange(gpsSysSpecificData->psSGXDevNode->sDevId.ui32DeviceIndex, IMG_TRUE, IMG_NULL);
-
- gpsSysSpecificData->bCallVDD2PostFunc = (eError == PVRSRV_OK);
-
- }
-
- return 0;
-}
-
-static struct notifier_block sVDD2Pre = {
- VDD2PreFunc,
- NULL
-};
-
-static struct notifier_block sVDD2Post = {
- VDD2PostFunc,
- NULL
-};
-
-static IMG_VOID RegisterConstraintNotifications(IMG_VOID)
-{
- PVR_TRACE(("Registering constraint notifications"));
-
- PVR_ASSERT(!gpsSysSpecificData->bConstraintNotificationsEnabled);
-
- constraint_register_pre_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Pre,
- max_vdd2_opp+1);
-
- constraint_register_post_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Post,
- max_vdd2_opp+1);
-
-
- NotifyLock(gpsSysSpecificData);
- gpsSysSpecificData->bConstraintNotificationsEnabled = IMG_TRUE;
- NotifyUnlock(gpsSysSpecificData);
-
- PVR_TRACE(("VDD2 constraint notifications registered"));
-}
-
-static IMG_VOID UnRegisterConstraintNotifications(IMG_VOID)
-{
- PVR_TRACE(("Unregistering constraint notifications"));
-
-
- NotifyLock(gpsSysSpecificData);
- gpsSysSpecificData->bConstraintNotificationsEnabled = IMG_FALSE;
- NotifyUnlock(gpsSysSpecificData);
-
-
- constraint_unregister_pre_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Pre,
- max_vdd2_opp+1);
-
- constraint_unregister_post_notification(gpsSysSpecificData->pVdd2Handle, &sVDD2Post,
- max_vdd2_opp+1);
-}
-#else
-static IMG_VOID RegisterConstraintNotifications(IMG_VOID)
-{
-}
-
-static IMG_VOID UnRegisterConstraintNotifications(IMG_VOID)
-{
-}
-#endif
-#endif
-
PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
{
#if !defined(NO_HARDWARE)
SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- long lNewRate;
- long lRate;
IMG_INT res;
-
+ long lRate,lNewRate;
if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
{
}
PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));
-
- res = clk_enable(psSysSpecData->psSGX_FCK);
+
+ res=clk_enable(psSysSpecData->psSGX_FCK);
if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
-
-#if 0
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+ }
+
lNewRate = clk_round_rate(psSysSpecData->psSGX_FCK, SYS_SGX_CLOCK_SPEED + ONE_MHZ);
- if (lNewRate <= 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));
- return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE;
- }
+ if (lNewRate <= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));
+ return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE;
+ }
+
+
+ lRate = clk_get_rate(psSysSpecData->psSGX_FCK);
+ if (lRate != lNewRate)
+ {
+ res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res));
+ return PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE;
+ }
+ }
-
- lRate = clk_get_rate(psSysSpecData->psSGX_FCK);
- if (lRate != lNewRate)
- {
- res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res));
- }
- }
+#if defined(DEBUG)
+ {
+ IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
+ PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
+ }
#endif
-// IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
-// PVR_TRACE((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
-#if defined(DEBUG)
+
+
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
{
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
+
+// int res = pm_runtime_get_sync(&gpsPVRLDMDev->dev);
+// if (res < 0)
+// {
+// PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: pm_runtime_get_sync failed (%d)", -res));
+// return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+// }
}
#endif
+// SysEnableSGXInterrupts(psSysData);
atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);
#if !defined(NO_HARDWARE)
SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
-
if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)
{
return;
}
-
+
PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));
+
+ clk_disable(psSysSpecData->psSGX_FCK);
+
+ clk_disable(psSysSpecData->psSGX_ICK);
- if (psSysSpecData->psSGX_FCK)
+// SysDisableSGXInterrupts(psSysData);
+
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
{
- clk_disable(psSysSpecData->psSGX_FCK);
+// int res = pm_runtime_put_sync(&gpsPVRLDMDev->dev);
+// if (res < 0)
+// {
+// PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: pm_runtime_put_sync failed (%d)", -res));
+// }
}
+#endif
-
atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
#else
#endif
}
-PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+#if (defined(DEBUG) || defined(TIMING)) && !defined(PVR_NO_OMAP_TIMER)
+#if defined(PVR_OMAP_USE_DM_TIMER_API)
+#define GPTIMER_TO_USE 11
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- struct clk *psCLK;
- IMG_INT res;
- PVRSRV_ERROR eError;
- IMG_BOOL bPowerLock;
+ PVR_ASSERT(psSysSpecData->psGPTimer == NULL);
-#if defined(DEBUG) || defined(TIMING)
- IMG_INT rate;
- struct clk *sys_ck;
- IMG_CPU_PHYADDR TimerRegPhysBase;
- IMG_HANDLE hTimerEnable;
- IMG_UINT32 *pui32TimerEnable;
+
+ psSysSpecData->psGPTimer = omap_dm_timer_request_specific(GPTIMER_TO_USE);
+ if (psSysSpecData->psGPTimer == NULL)
+ {
+
+ PVR_DPF((PVR_DBG_WARNING, "%s: omap_dm_timer_request_specific failed", __FUNCTION__));
+ return PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
+ }
-#endif
+
+ omap_dm_timer_set_source(psSysSpecData->psGPTimer, OMAP_TIMER_SRC_SYS_CLK);
+ omap_dm_timer_enable(psSysSpecData->psGPTimer);
- PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ omap_dm_timer_set_load_start(psSysSpecData->psGPTimer, 1, 0);
- if (!psSysSpecData->bSysClocksOneTimeInit)
- {
- bPowerLock = IMG_FALSE;
+ omap_dm_timer_start(psSysSpecData->psGPTimer);
- spin_lock_init(&psSysSpecData->sPowerLock);
- atomic_set(&psSysSpecData->sPowerLockCPU, -1);
- spin_lock_init(&psSysSpecData->sNotifyLock);
- atomic_set(&psSysSpecData->sNotifyLockCPU, -1);
+
+ psSysSpecData->sTimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_REGS_SYS_PHYS_BASE;
- atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
-
- psCLK = clk_get(NULL, "sgx_ck");
-
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
- goto ExitError;
- }
- if(clk_enable(psCLK) != 0)
- {
- printk("Could not enable SGX clock\n");
- goto ExitError;
- }
- psSysSpecData->psSGX_FCK = psCLK;
-
- psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
- }
- else
- {
-
- bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
- if (bPowerLock)
- {
- PowerLockUnwrap(psSysSpecData);
- }
- }
+ return PVRSRV_OK;
+}
-#if defined(CONSTRAINT_NOTIFICATIONS)
- psSysSpecData->pVdd2Handle = constraint_get(PVRSRV_MODNAME, &cnstr_id_vdd2);
- if (IS_ERR(psSysSpecData->pVdd2Handle))
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ if (psSysSpecData->psGPTimer != NULL)
{
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get VDD2 constraint handle"));
- goto ExitError;
+
+ (void) omap_dm_timer_stop(psSysSpecData->psGPTimer);
+
+ omap_dm_timer_disable(psSysSpecData->psGPTimer);
+
+ omap_dm_timer_free(psSysSpecData->psGPTimer);
+
+ psSysSpecData->sTimerRegPhysBase.uiAddr = 0;
+
+ psSysSpecData->psGPTimer = NULL;
}
- RegisterConstraintNotifications();
+}
+#else
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+#if defined(PVR_OMAP3_TIMING_PRCM)
+ struct clk *psCLK;
+ IMG_INT res;
+ struct clk *sys_ck;
+ IMG_INT rate;
+#endif
+ PVRSRV_ERROR eError;
+
+ IMG_CPU_PHYADDR sTimerRegPhysBase;
+ IMG_HANDLE hTimerEnable;
+ IMG_UINT32 *pui32TimerEnable;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ PVR_ASSERT(psSysSpecData->sTimerRegPhysBase.uiAddr == 0);
#endif
-#if defined(DEBUG) || defined(TIMING)
- psCLK = clk_get(NULL, "gpt7_fck");
+
+#if defined(PVR_OMAP3_TIMING_PRCM)
+
+ psCLK = clk_get(NULL, "gpt7_fck");
if (IS_ERR(psCLK))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
psSysSpecData->psGPT11_FCK = psCLK;
- psCLK = clk_get(NULL, "gpt7_ick");
+ psCLK = clk_get(NULL, "gpt7_ick");
if (IS_ERR(psCLK))
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
psSysSpecData->psGPT11_ICK = psCLK;
if (res < 0)
{
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
+ goto ExitError;
}
res = clk_enable(psSysSpecData->psGPT11_ICK);
PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
goto ExitDisableGPT11FCK;
}
+#endif
- TimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_TSICR_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
+ sTimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_TSICR_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
&hTimerEnable);
goto ExitDisableGPT11ICK;
}
- rate = *pui32TimerEnable;
- if(!(rate & 4))
+ if(!(*pui32TimerEnable & 4))
{
PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
- *pui32TimerEnable = rate | 4;
+ *pui32TimerEnable |= 4;
}
OSUnMapPhysToLin(pui32TimerEnable,
hTimerEnable);
- TimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_ENABLE_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
+ sTimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_ENABLE_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
&hTimerEnable);
goto ExitDisableGPT11ICK;
}
-
*pui32TimerEnable = 3;
OSUnMapPhysToLin(pui32TimerEnable,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
hTimerEnable);
-
-#endif
-
-#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
- PVR_TRACE(("EnableSystemClocks: Setting SGX OPP constraint"));
-
-
- res = constraint_set(psSysSpecData->pVdd2Handle, max_vdd2_opp);
- if (res != 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: constraint_set failed (%d)", res));
- goto ExitConstraintSetFailed;
- }
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ psSysSpecData->sTimerRegPhysBase = sTimerRegPhysBase;
#endif
+
eError = PVRSRV_OK;
+
goto Exit;
-#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
-ExitConstraintSetFailed:
-#endif
-#if defined(DEBUG) || defined(TIMING)
ExitDisableGPT11ICK:
+#if defined(PVR_OMAP3_TIMING_PRCM)
clk_disable(psSysSpecData->psGPT11_ICK);
ExitDisableGPT11FCK:
clk_disable(psSysSpecData->psGPT11_FCK);
-ExitUnRegisterConstraintNotifications:
+ExitError:
#endif
-#if defined(CONSTRAINT_NOTIFICATIONS)
- UnRegisterConstraintNotifications();
- constraint_put(psSysSpecData->pVdd2Handle);
-#endif
+ eError = PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
Exit:
- if (bPowerLock)
- {
- PowerLockWrap(psSysSpecData);
- }
-
-ExitError:
- eError = PVRSRV_ERROR_DISABLE_CLOCK_FAILURE;
return eError;
}
-IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- IMG_BOOL bPowerLock;
-#if defined(DEBUG) || defined(TIMING)
- IMG_CPU_PHYADDR TimerRegPhysBase;
IMG_HANDLE hTimerDisable;
IMG_UINT32 *pui32TimerDisable;
-#endif
-
- PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
-
-
- DisableSGXClocks(psSysData);
-
- bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
- if (bPowerLock)
- {
-
- PowerLockUnwrap(psSysSpecData);
- }
-
-#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
+ IMG_CPU_PHYADDR TimerRegPhysBase;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ if (psSysSpecData->sTimerRegPhysBase.uiAddr == 0)
{
- int res;
-
- PVR_TRACE(("DisableSystemClocks: Removing SGX OPP constraint"));
-
-
- res = constraint_remove(psSysSpecData->pVdd2Handle);
- if (res != 0)
- {
- PVR_DPF((PVR_DBG_WARNING, "DisableSystemClocks: constraint_remove failed (%d)", res));
- }
+ return;
}
#endif
-
-#if defined(CONSTRAINT_NOTIFICATIONS)
- UnRegisterConstraintNotifications();
-#endif
-
-#if defined(DEBUG) || defined(TIMING)
-
- TimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_ENABLE_SYS_PHYS_BASE;
+ TimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_ENABLE_SYS_PHYS_BASE;
pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,
4,
PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
hTimerDisable);
}
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ psSysSpecData->sTimerRegPhysBase.uiAddr = 0;
+#endif
+
+#if defined(PVR_OMAP3_TIMING_PRCM)
clk_disable(psSysSpecData->psGPT11_ICK);
clk_disable(psSysSpecData->psGPT11_FCK);
-
+#endif
+}
+#endif
+#else
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysSpecData);
+ return PVRSRV_OK;
+}
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysSpecData);
+}
#endif
-#if defined(CONSTRAINT_NOTIFICATIONS)
- constraint_put(psSysSpecData->pVdd2Handle);
-#endif
- if (bPowerLock)
+
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ struct clk *psCLK;
+ PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ if (!psSysSpecData->bSysClocksOneTimeInit)
{
- PowerLockWrap(psSysSpecData);
+ mutex_init(&psSysSpecData->sPowerLock);
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+
+
+ psCLK = clk_get(NULL, "sgx_ck");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
+ return PVRSRV_ERROR_UNABLE_TO_GET_CLOCK;
+ }
+ psSysSpecData->psSGX_FCK = psCLK;
+
}
+
+ return AcquireGPTimer(psSysSpecData);
+}
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
+
+ DisableSGXClocks(psSysData);
+
+ ReleaseGPTimer(psSysSpecData);
+
+}
+
+PVRSRV_ERROR SysPMRuntimeRegister(void)
+{
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+// pm_runtime_enable(&gpsPVRLDMDev->dev);
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysPMRuntimeUnregister(void)
+{
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+// pm_runtime_disable(&gpsPVRLDMDev->dev);
+#endif
+ return PVRSRV_OK;
}
+++ /dev/null
-/**********************************************************************
- *
- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful but, except
- * as otherwise stated in writing, without any warranty; without even the
- * implied warranty of merchantability or fitness for a particular purpose.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
- *
- ******************************************************************************/
-
-#include <linux/version.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/hardirq.h>
-#include <linux/mutex.h>
-
-#include "sgxdefs.h"
-#include "services_headers.h"
-#include "sysinfo.h"
-#include "sgxapi_km.h"
-#include "sysconfig.h"
-#include "sgxinfokm.h"
-#include "syslocal.h"
-
-#if !defined(PVR_LINUX_USING_WORKQUEUES)
-#error "PVR_LINUX_USING_WORKQUEUES must be defined"
-#endif
-
-#define ONE_MHZ 1000000
-#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
-
-#if defined(SUPPORT_OMAP3430_SGXFCLK_96M)
-#define SGX_PARENT_CLOCK "cm_96m_fck"
-#else
-#define SGX_PARENT_CLOCK "core_ck"
-#endif
-
-static PVRSRV_ERROR PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData, IMG_BOOL bTryLock)
-{
- if (!in_interrupt())
- {
- if (bTryLock)
- {
- int locked = mutex_trylock(&psSysSpecData->sPowerLock);
- if (locked == 0)
- {
- return PVRSRV_ERROR_RETRY;
- }
- }
- else
- {
- mutex_lock(&psSysSpecData->sPowerLock);
- }
- }
-
- return PVRSRV_OK;
-}
-
-static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- if (!in_interrupt())
- {
- mutex_unlock(&psSysSpecData->sPowerLock);
- }
-}
-
-PVRSRV_ERROR SysPowerLockWrap(IMG_BOOL bTryLock)
-{
- SYS_DATA *psSysData;
-
- SysAcquireData(&psSysData);
-
- return PowerLockWrap(psSysData->pvSysSpecificData, bTryLock);
-}
-
-IMG_VOID SysPowerLockUnwrap(IMG_VOID)
-{
- SYS_DATA *psSysData;
-
- SysAcquireData(&psSysData);
-
- PowerLockUnwrap(psSysData->pvSysSpecificData);
-}
-
-/*
- * This function should be called to unwrap the Services power lock, prior
- * to calling any function that might sleep.
- * This function shouldn't be called prior to calling EnableSystemClocks
- * or DisableSystemClocks, as those functions perform their own power lock
- * unwrapping.
- * If the function returns IMG_TRUE, UnwrapSystemPowerChange must be
- * called to rewrap the power lock, prior to returning to Services.
- */
-IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
-{
- return IMG_TRUE;
-}
-
-IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
-{
-}
-
-static inline IMG_UINT32 scale_by_rate(IMG_UINT32 val, IMG_UINT32 rate1, IMG_UINT32 rate2)
-{
- if (rate1 >= rate2)
- {
- return val * (rate1 / rate2);
- }
-
- return val / (rate2 / rate1);
-}
-
-static inline IMG_UINT32 scale_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)
-{
- return scale_by_rate(val, rate, SYS_SGX_CLOCK_SPEED);
-}
-
-static inline IMG_UINT32 scale_inv_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)
-{
- return scale_by_rate(val, SYS_SGX_CLOCK_SPEED, rate);
-}
-
-IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psTimingInfo)
-{
- IMG_UINT32 rate;
-
-#if defined(NO_HARDWARE)
- rate = SYS_SGX_CLOCK_SPEED;
-
-#else
- PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
-
- rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
- PVR_ASSERT(rate != 0);
-#endif
- psTimingInfo->ui32CoreClockSpeed = rate;
- psTimingInfo->ui32HWRecoveryFreq = scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);
- psTimingInfo->ui32uKernelFreq = scale_prop_to_SGX_clock(SYS_SGX_PDS_TIMER_FREQ, rate);
-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
- psTimingInfo->bEnableActivePM = IMG_TRUE;
-#else
- psTimingInfo->bEnableActivePM = IMG_FALSE;
-#endif
- psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
-}
-
-PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
-{
-#if !defined(NO_HARDWARE)
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
-/*
- long lNewRate;
- long lRate;
-*/
- IMG_INT res;
-
-
- if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
- {
- return PVRSRV_OK;
- }
-
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));
-
-#if defined(DEBUG)
- {
-
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psMPU_CK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: CPU Clock is %dMhz", HZ_TO_MHZ(rate)));
- }
-#endif
-
- res = clk_enable(psSysSpecData->psSGX_FCK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
-// res = clk_get_rate(psSysSpecData->psSGX_FCK);
-// PVR_TRACE(("Default SGX clock rate is %dMHz", HZ_TO_MHZ(res)));
-
- res = clk_set_rate(psSysSpecData->psSGX_FCK,SYS_SGX_CLOCK_SPEED);
-
- if(res != 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't set SGX Functional Clock rate"));
- return PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE;
- }
-
- else
- {
- res = clk_get_rate(psSysSpecData->psSGX_FCK);
-// PVR_TRACE(("SGX clock rate after set_clk_rate is %dMHz", HZ_TO_MHZ(res)));
- }
-
-
-/*
- res = clk_enable(psSysSpecData->psSGX_ICK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", res));
-
- clk_disable(psSysSpecData->psSGX_FCK);
- return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
- }
-
- lNewRate = clk_round_rate(psSysSpecData->psSGX_FCK, SYS_SGX_CLOCK_SPEED + ONE_MHZ);
- if (lNewRate <= 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));
- return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE;
- }
-
-
- lRate = clk_get_rate(psSysSpecData->psSGX_FCK);
- if (lRate != lNewRate)
- {
- res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res));
- }
- }
-*/
-#if defined(DEBUG)
- {
- IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
- PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
- }
-#endif
-
-
- atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);
-
-#else
- PVR_UNREFERENCED_PARAMETER(psSysData);
-#endif
- return PVRSRV_OK;
-}
-
-
-IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
-{
-#if !defined(NO_HARDWARE)
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
-
-
- if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)
- {
- return;
- }
-
- PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));
-
-/* if (psSysSpecData->psSGX_ICK)
- {
- clk_disable(psSysSpecData->psSGX_ICK);
- }
-*/
- if (psSysSpecData->psSGX_FCK)
- {
- clk_disable(psSysSpecData->psSGX_FCK);
- }
-
-
- atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
-
-#else
- PVR_UNREFERENCED_PARAMETER(psSysData);
-#endif
-}
-
-PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
-{
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- struct clk *psCLK;
-// IMG_INT res;
-// IMG_BOOL bPowerLock;
- PVRSRV_ERROR eError;
-
-#if defined(DEBUG) || defined(TIMING)
- IMG_INT res;
- IMG_INT rate;
- struct clk *sys_ck;
- IMG_CPU_PHYADDR TimerRegPhysBase;
- IMG_HANDLE hTimerEnable;
- IMG_UINT32 *pui32TimerEnable;
-
-#endif
-
- PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
-
- if (!psSysSpecData->bSysClocksOneTimeInit)
- {
- mutex_init(&psSysSpecData->sPowerLock);
-
- atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
-
- psCLK = clk_get(NULL, "sgx_ck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
- goto ExitError;
- }
-/* if(clk_enable(psCLK) != 0)
- {
- printk("Could not enable SGX clock\n");
- goto ExitError;
- }
-*/
- psSysSpecData->psSGX_FCK = psCLK;
-
- psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
- }
-
- /* psCLK = clk_get(NULL, "sgx_ck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
- goto ExitError;
- }
- if(clk_enable(psCLK) != 0)
- {
- printk("Could not enable SGX clock\n");
- goto ExitError;
- }
-
- psSysSpecData->psSGX_FCK = psCLK; */
-/* else
- {
-
- bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
- if (bPowerLock)
- {
- PowerLockUnwrap(psSysSpecData);
- }
- }
-
-*/
-
-/*
- psCLK = clk_get(NULL, SGX_PARENT_CLOCK);
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get Core Clock"));
- goto ExitError;
- }
- psSysSpecData->psCORE_CK = psCLK;
-
- psCLK = clk_get(NULL, "sgx_fck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
- goto ExitError;
- }
- psSysSpecData->psSGX_FCK = psCLK;
-
- psCLK = clk_get(NULL, "sgx_ick");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get SGX Interface Clock"));
- goto ExitError;
- }
- psSysSpecData->psSGX_ICK = psCLK;
-
-#if defined(DEBUG)
- psCLK = clk_get(NULL, "mpu_ck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get MPU Clock"));
- goto ExitError;
- }
- psSysSpecData->psMPU_CK = psCLK;
-#endif
- res = clk_set_parent(psSysSpecData->psSGX_FCK, psSysSpecData->psCORE_CK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set SGX parent clock (%d)", res));
- goto ExitError;
- }
-
- psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
- }
-*/
-#if defined(DEBUG) || defined(TIMING)
- psCLK = clk_get(NULL, "gpt7_fck");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
- goto ExitUnRegisterConstraintNotifications;
- }
- psSysSpecData->psGPT11_FCK = psCLK;
-
- psCLK = clk_get(NULL, "gpt7_ick");
- if (IS_ERR(psCLK))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
- goto ExitUnRegisterConstraintNotifications;
- }
- psSysSpecData->psGPT11_ICK = psCLK;
-/*
- sys_ck = clk_get(NULL, "sys_ck");
- if (IS_ERR(sys_ck))
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
- goto ExitUnRegisterConstraintNotifications;
- }
-
- if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck)
- {
- PVR_TRACE(("Setting GPTIMER11 parent to System Clock"));
- res = clk_set_parent(psSysSpecData->psGPT11_FCK, sys_ck);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
- }
- }
-*/
- rate = clk_get_rate(psSysSpecData->psGPT11_FCK);
- PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate)));
-
- res = clk_enable(psSysSpecData->psGPT11_FCK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
- goto ExitUnRegisterConstraintNotifications;
- }
-
- res = clk_enable(psSysSpecData->psGPT11_ICK);
- if (res < 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
- goto ExitDisableGPT11FCK;
- }
-
-
- TimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_TSICR_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- &hTimerEnable);
-
- if (pui32TimerEnable == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
- goto ExitDisableGPT11ICK;
- }
-
- rate = *pui32TimerEnable;
- if(!(rate & 4))
- {
- PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
-
-
- *pui32TimerEnable = rate | 4;
- }
-
- OSUnMapPhysToLin(pui32TimerEnable,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- hTimerEnable);
-
-
- TimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_ENABLE_SYS_PHYS_BASE;
- pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- &hTimerEnable);
-
- if (pui32TimerEnable == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
- goto ExitDisableGPT11ICK;
- }
-
-
- *pui32TimerEnable = 3;
-
- OSUnMapPhysToLin(pui32TimerEnable,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- hTimerEnable);
-
-#endif
-
- eError = PVRSRV_OK;
- goto Exit;
-
-#if defined(DEBUG) || defined(TIMING)
-ExitDisableGPT11ICK:
- clk_disable(psSysSpecData->psGPT11_ICK);
-ExitDisableGPT11FCK:
- clk_disable(psSysSpecData->psGPT11_FCK);
-ExitUnRegisterConstraintNotifications:
-#endif
-ExitError:
- eError = PVRSRV_ERROR_DISABLE_CLOCK_FAILURE;
-Exit:
- return eError;
-}
-
-IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
-{
-#if defined(DEBUG) || defined(TIMING)
- SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
- IMG_CPU_PHYADDR TimerRegPhysBase;
- IMG_HANDLE hTimerDisable;
- IMG_UINT32 *pui32TimerDisable;
-#endif
-
- PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
-
-
- DisableSGXClocks(psSysData);
-
-#if defined(DEBUG) || defined(TIMING)
-
- TimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_ENABLE_SYS_PHYS_BASE;
- pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- &hTimerDisable);
-
- if (pui32TimerDisable == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: OSMapPhysToLin failed"));
- }
- else
- {
- *pui32TimerDisable = 0;
-
- OSUnMapPhysToLin(pui32TimerDisable,
- 4,
- PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
- hTimerDisable);
- }
-
- clk_disable(psSysSpecData->psGPT11_ICK);
-
- clk_disable(psSysSpecData->psGPT11_FCK);
-
-#endif
-}