}
static int
-FlushCacheDRI(IMG_UINT32 ui32Type, IMG_UINT32 ui32Virt, IMG_UINT32 ui32Length)
+FlushCacheDRI(IMG_UINT32 ui32Type, IMG_VOID *pvVirt, IMG_UINT32 ui32Length)
{
switch (ui32Type) {
case DRM_PVR2D_CFLUSH_FROM_GPU:
PVR_DPF((PVR_DBG_MESSAGE,
"DRM_PVR2D_CFLUSH_FROM_GPU 0x%08x, length 0x%08x\n",
- ui32Virt, ui32Length));
+ pvVirt, ui32Length));
#ifdef CONFIG_ARM
- dmac_inv_range((const void *)ui32Virt,
- (const void *)(ui32Virt + ui32Length));
+ dmac_inv_range((const void *)pvVirt,
+ (const void *)(pvVirt + ui32Length));
#endif
return 0;
case DRM_PVR2D_CFLUSH_TO_GPU:
PVR_DPF((PVR_DBG_MESSAGE,
"DRM_PVR2D_CFLUSH_TO_GPU 0x%08x, length 0x%08x\n",
- ui32Virt, ui32Length));
+ pvVirt, ui32Length));
#ifdef CONFIG_ARM
- dmac_clean_range((const void *)ui32Virt,
- (const void *)(ui32Virt + ui32Length));
+ dmac_clean_range((const void *)pvVirt,
+ (const void *)(pvVirt + ui32Length));
#endif
return 0;
default:
return 0;
}
+PVRSRV_ERROR
+PVRSRVIsWrappedExtMemoryBW(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER *psCacheFlushIN)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hDevCookieInt;
+
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psCacheFlushIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ eError = PVRSRVIsWrappedExtMemoryKM(
+ hDevCookieInt,
+ psPerProc,
+ &(psCacheFlushIN->ui32Length),
+ &(psCacheFlushIN->pvVirt));
+
+ return eError;
+}
+
static int
PVRSRVCacheFlushDRIBW(IMG_UINT32 ui32BridgeID,
PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER * psCacheFlushIN,
PVRSRV_BRIDGE_RETURN * psRetOUT,
PVRSRV_PER_PROCESS_DATA * psPerProc)
{
+ PVRSRV_ERROR eError;
PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CACHE_FLUSH_DRM);
- psRetOUT->eError = FlushCacheDRI(psCacheFlushIN->ui32Type,
- psCacheFlushIN->ui32Virt,
- psCacheFlushIN->ui32Length);
+ down_read(¤t->mm->mmap_sem);
+
+ eError = PVRSRVIsWrappedExtMemoryBW(psPerProc, psCacheFlushIN);
+
+ if (eError == PVRSRV_OK) {
+ psRetOUT->eError = FlushCacheDRI(psCacheFlushIN->ui32Type,
+ psCacheFlushIN->pvVirt,
+ psCacheFlushIN->ui32Length);
+ } else {
+ printk(KERN_WARNING
+ ": PVRSRVCacheFlushDRIBW: Start address 0x%08x and length 0x%08x not wrapped \n",
+ (unsigned int)(psCacheFlushIN->pvVirt),
+ (unsigned int)(psCacheFlushIN->ui32Length));
+ }
+ up_read(¤t->mm->mmap_sem);
return 0;
}
IMG_HANDLE hDevCookieInt;
PVRSRV_KERNEL_MEM_INFO *psMemInfo;
IMG_UINT32 ui32PageTableSize = 0;
- IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;;
+ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
#include "ra.h"
#include "pdump_km.h"
+#include <linux/kernel.h>
+
#define MIN(a,b) (a > b ? b : a)
static IMG_BOOL
return pBuf != IMG_NULL;
}
+IMG_BOOL
+BM_IsWrappedCheckSize(IMG_HANDLE hDevMemHeap,
+ IMG_UINT32 ui32Offset,
+ IMG_SYS_PHYADDR sSysAddr,
+ IMG_UINT32 ui32ByteSize)
+{
+ BM_BUF *pBuf;
+ BM_CONTEXT *psBMContext;
+ BM_HEAP *psBMHeap;
+
+ IMG_BOOL ret = IMG_FALSE;
+
+ psBMHeap = (BM_HEAP *) hDevMemHeap;
+ psBMContext = psBMHeap->pBMContext;
+ sSysAddr.uiAddr += ui32Offset;
+ pBuf = (BM_BUF *) HASH_Retrieve(psBMContext->pBufferHash,
+ (IMG_UINTPTR_T) sSysAddr.uiAddr);
+
+ if (pBuf != NULL) {
+ if (pBuf->pMapping->uSize >= ui32ByteSize)
+ ret = IMG_TRUE;
+ else
+ ret = IMG_FALSE;
+ }
+
+ return ret;
+}
+
IMG_BOOL
BM_Wrap(IMG_HANDLE hDevMemHeap,
IMG_UINT32 ui32Size,
BM_IsWrapped(IMG_HANDLE hDevMemHeap,
IMG_UINT32 ui32Offset, IMG_SYS_PHYADDR sSysAddr);
+ IMG_BOOL
+ BM_IsWrappedCheckSize(IMG_HANDLE hDevMemHeap,
+ IMG_UINT32 ui32Offset,
+ IMG_SYS_PHYADDR sSysAddr,
+ IMG_UINT32 ui32ByteSize);
+
IMG_BOOL
BM_Wrap(IMG_HANDLE hDevMemHeap,
IMG_UINT32 ui32Size,
#include "pvr_bridge_km.h"
#include "linux/kernel.h"
+#include "linux/pagemap.h"
static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie,
IMG_HANDLE hDevMemHeap,
}
if (hOSWrapMem) {
- OSReleasePhysPageAddr(hOSWrapMem);
+ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE);
}
return eError;
}
+IMG_EXPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVIsWrappedExtMemoryKM(IMG_HANDLE hDevCookie,
+ PVRSRV_PER_PROCESS_DATA
+ *psPerProc,
+ IMG_UINT32
+ *pui32ByteSize,
+ IMG_VOID
+ **pvLinAddr)
+{
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ IMG_UINT32 ui32HostPageSize = HOST_PAGESIZE();
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_ERROR eError;
+ IMG_SYS_PHYADDR sIntSysPAddr;
+ IMG_HANDLE hOSWrapMem = IMG_NULL;
+ IMG_HANDLE hDevMemHeap;
+ IMG_UINT32 ui32PageOffset = 0;
+
+ IMG_UINT32 ui32ReturnedByteSize = *pui32ByteSize;
+
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
+ PVR_ASSERT(psDeviceNode != IMG_NULL);
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+
+ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].hDevMemHeap;
+
+ if (pvLinAddr) {
+ ui32PageOffset = ((IMG_UINT32)*pvLinAddr) & ~PAGE_MASK;
+ *pvLinAddr = (IMG_VOID *)((IMG_UINT32)*pvLinAddr & PAGE_MASK);
+ ui32ReturnedByteSize += ui32PageOffset;
+
+ /* let's start by getting the address of the first page */
+ eError = OSAcquirePhysPageAddr(*pvLinAddr,
+ ui32HostPageSize,
+ &sIntSysPAddr,
+ &hOSWrapMem,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK) {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVIsWrappedExtMemoryKM: Failed to alloc memory for block"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorExitPhase1;
+ }
+
+ OSReleasePhysPageAddr(hOSWrapMem, IMG_FALSE);
+ hOSWrapMem = IMG_NULL;
+
+ /* now check if this memory address is already wrapped */
+ if (BM_IsWrappedCheckSize(hDevMemHeap,
+ ui32PageOffset,
+ sIntSysPAddr,
+ *pui32ByteSize)) {
+ /* already wrapped */
+ eError = PVRSRV_OK;
+ } else {
+ /* not mapped in this heap */
+ /* try the alternative heap */
+ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[SGX_ALT_MAPPING_HEAP_ID].hDevMemHeap;
+
+ if (BM_IsWrappedCheckSize(hDevMemHeap,
+ ui32PageOffset,
+ sIntSysPAddr,
+ *pui32ByteSize)) {
+ /* already wrapped */
+ eError = PVRSRV_OK;
+ } else {
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ }
+ }
+ }
+
+ErrorExitPhase1:
+
+ *pui32ByteSize = ui32ReturnedByteSize;
+
+ return eError;
+}
+
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie,
PVRSRV_PER_PROCESS_DATA *
IMG_SYS_PHYADDR *pPageList = psExtSysPAddr;
IMG_UINT32 ui32PageCount;
+ IMG_UINT32 ui32CalculatedPageOffset = ((IMG_UINT32)pvLinAddr) & ~PAGE_MASK;
+ if (ui32CalculatedPageOffset != ui32PageOffset) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVWrapExtMemoryKM: offset from address not match offset param"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie;
PVR_ASSERT(psDeviceNode != IMG_NULL);
psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
/* let's start by getting the address of the first page */
eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
ui32HostPageSize,
- psIntSysPAddr, &hOSWrapMem);
+ psIntSysPAddr, &hOSWrapMem, IMG_TRUE);
if (eError != PVRSRV_OK) {
PVR_DPF((PVR_DBG_ERROR,
"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
/* now check if this memory address is already wrapped */
if (BM_IsWrapped(hDevMemHeap, ui32PageOffset, psIntSysPAddr[0])) {
/* already wrapped */
- OSReleasePhysPageAddr(hOSWrapMem);
+ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE);
hOSWrapMem = IMG_NULL;
} else if (ui32PageCount > 1) {
- OSReleasePhysPageAddr(hOSWrapMem);
+ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE);
hOSWrapMem = IMG_NULL;
/* the memory is going to wrapped for the first time,
* so we need full page list */
ui32PageCount *
ui32HostPageSize,
psIntSysPAddr,
- &hOSWrapMem);
+ &hOSWrapMem,
+ IMG_TRUE);
if (eError != PVRSRV_OK) {
PVR_DPF((PVR_DBG_ERROR,
"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
}
if (hOSWrapMem)
- OSReleasePhysPageAddr(hOSWrapMem);
+ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE);
ErrorExitPhase1:
if (psIntSysPAddr) {
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
psTimerCBData->bActive = IMG_TRUE;
+ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
add_timer(&psTimerCBData->sTimer);
return PVRSRV_OK;
return psPage;
}
-PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem)
+PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem,
+ IMG_BOOL bUseLock)
{
sWrapMemInfo *psInfo = (sWrapMemInfo *) hOSWrapMem;
unsigned ui;
{
struct vm_area_struct *psVMArea;
- down_read(¤t->mm->mmap_sem);
+ if (bUseLock)
+ down_read(¤t->mm->mmap_sem);
psVMArea = find_vma(current->mm, psInfo->ulStartAddr);
if (psVMArea == NULL) {
": OSCpuVToPageListRelease: Couldn't find memory region containing start address %lx",
psInfo->ulStartAddr);
- up_read(¤t->mm->mmap_sem);
+ if (bUseLock)
+ up_read(¤t->mm->mmap_sem);
+
break;
}
psVMArea->vm_flags);
}
- up_read(¤t->mm->mmap_sem);
+ if (bUseLock)
+ up_read(¤t->mm->mmap_sem);
+
break;
}
default:
return PVRSRV_OK;
}
-PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID * pvCPUVAddr,
+PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID *pvCPUVAddr,
IMG_UINT32 ui32Bytes,
- IMG_SYS_PHYADDR * psSysPAddr,
- IMG_HANDLE * phOSWrapMem)
+ IMG_SYS_PHYADDR *psSysPAddr,
+ IMG_HANDLE *phOSWrapMem,
+ IMG_BOOL bUseLock)
{
unsigned long ulStartAddrOrig = (unsigned long)pvCPUVAddr;
unsigned long ulAddrRangeOrig = (unsigned long)ui32Bytes;
psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL);
if (psInfo == NULL) {
printk(KERN_WARNING
- ": OSCpuVToPageList: Couldn't allocate information structure");
+ ": OSCpuVToPageList: Couldn't allocate information structure\n");
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
memset(psInfo, 0, sizeof(*psInfo));
GFP_KERNEL);
if (psInfo->psPhysAddr == NULL) {
printk(KERN_WARNING
- ": OSCpuVToPageList: Couldn't allocate page array");
+ ": OSCpuVToPageList: Couldn't allocate page array\n");
goto error_free;
}
kmalloc(psInfo->iNumPages * sizeof(*psInfo->ppsPages), GFP_KERNEL);
if (psInfo->ppsPages == NULL) {
printk(KERN_WARNING
- ": OSCpuVToPageList: Couldn't allocate page array");
+ ": OSCpuVToPageList: Couldn't allocate page array\n");
goto error_free;
}
- down_read(¤t->mm->mmap_sem);
+ if (bUseLock)
+ down_read(¤t->mm->mmap_sem);
+
iNumPagesMapped =
get_user_pages(current, current->mm, ulStartAddr, psInfo->iNumPages,
1, 0, psInfo->ppsPages, NULL);
- up_read(¤t->mm->mmap_sem);
+
+ if (bUseLock)
+ up_read(¤t->mm->mmap_sem);
+
if (iNumPagesMapped >= 0) {
if (iNumPagesMapped != psInfo->iNumPages) {
printk(KERN_WARNING
- ": OSCpuVToPageList: Couldn't map all the pages needed (wanted: %d, got %d)",
+ ": OSCpuVToPageList: Couldn't map all the pages needed (wanted: %d, got %d \n)",
psInfo->iNumPages, iNumPagesMapped);
for (ui = 0; ui < iNumPagesMapped; ui++) {
}
printk(KERN_WARNING
- ": OSCpuVToPageList: get_user_pages failed (%d), trying something else",
+ ": OSCpuVToPageList: get_user_pages failed (%d), trying something else \n",
iNumPagesMapped);
- down_read(¤t->mm->mmap_sem);
+ if (bUseLock)
+ down_read(¤t->mm->mmap_sem);
psVMArea = find_vma(current->mm, ulStartAddrOrig);
if (psVMArea == NULL) {
printk(KERN_WARNING
- ": OSCpuVToPageList: Couldn't find memory region containing start address %lx",
+ ": OSCpuVToPageList: Couldn't find memory region containing start address %lx \n",
ulStartAddrOrig);
goto error_release_mmap_sem;
if (ulStartAddrOrig < psVMArea->vm_start) {
printk(KERN_WARNING
- ": OSCpuVToPageList: Start address %lx is outside of the region returned by find_vma",
+ ": OSCpuVToPageList: Start address %lx is outside of the region returned by find_vma\n",
ulStartAddrOrig);
goto error_release_mmap_sem;
}
if (ulBeyondEndAddrOrig > psVMArea->vm_end) {
printk(KERN_WARNING
- ": OSCpuVToPageList: End address %lx is outside of the region returned by find_vma",
+ ": OSCpuVToPageList: End address %lx is outside of the region returned by find_vma\n",
ulBeyondEndAddrOrig);
goto error_release_mmap_sem;
}
if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) !=
(VM_IO | VM_RESERVED)) {
printk(KERN_WARNING
- ": OSCpuVToPageList: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)",
+ ": OSCpuVToPageList: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)\n",
psVMArea->vm_flags);
goto error_release_mmap_sem;
}
if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE)) {
printk(KERN_WARNING
- ": OSCpuVToPageList: No read/write access to memory region (VMA flags: 0x%lx)",
+ ": OSCpuVToPageList: No read/write access to memory region (VMA flags: 0x%lx)\n",
psVMArea->vm_flags);
goto error_release_mmap_sem;
}
unsigned uj;
printk(KERN_WARNING
- ": OSCpuVToPageList: Couldn't lookup page structure for address 0x%lx, trying something else",
+ ": OSCpuVToPageList: Couldn't lookup page structure for address 0x%lx, trying something else\n",
ulAddr);
for (uj = 0; uj < ui; uj++) {
if ((psVMArea->vm_flags & VM_PFNMAP) == 0) {
printk(KERN_WARNING
- ": OSCpuVToPageList: Region isn't a raw PFN mapping. Giving up.");
+ ": OSCpuVToPageList: Region isn't a raw PFN mapping. Giving up.\n");
goto error_release_mmap_sem;
}
psInfo->eType = WRAP_TYPE_FIND_VMA_PFN;
printk(KERN_WARNING
- ": OSCpuVToPageList: Region can't be locked down");
+ ": OSCpuVToPageList: Region can't be locked down\n");
}
- up_read(¤t->mm->mmap_sem);
+ if (bUseLock)
+ up_read(¤t->mm->mmap_sem);
exit_check:
CheckPagesContiguous(psInfo);
return PVRSRV_OK;
error_release_mmap_sem:
- up_read(¤t->mm->mmap_sem);
+ if (bUseLock)
+ up_read(¤t->mm->mmap_sem);
+
error_free:
psInfo->eType = WRAP_TYPE_CLEANUP;
- OSReleasePhysPageAddr((IMG_HANDLE) psInfo);
+ OSReleasePhysPageAddr((IMG_HANDLE) psInfo, bUseLock);
return PVRSRV_ERROR_GENERIC;
}
PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID * pvCPUVAddr,
IMG_UINT32 ui32Bytes,
IMG_SYS_PHYADDR * psSysPAddr,
- IMG_HANDLE * phOSWrapMem);
- PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem);
+ IMG_HANDLE * phOSWrapMem,
+ IMG_BOOL bUseLock);
+ PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem,
+ IMG_BOOL bUseLock);
#endif
mutex_unlock(&hPowerAndFreqLock);
}
+static IMG_BOOL IsPowerLocked(void)
+{
+ return mutex_is_locked(&hPowerAndFreqLock) || gbDvfsActive;
+}
+
IMG_EXPORT
PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID,
IMG_BOOL bSystemPowerEvent)
{
- if ((ui32CallerID == TIMER_ID) &&
- (mutex_is_locked(&hPowerAndFreqLock) || gbDvfsActive))
+ if ((ui32CallerID == TIMER_ID) && IsPowerLocked())
return PVRSRV_ERROR_RETRY;
mutex_lock(&hPowerAndFreqLock);
while (gbDvfsActive) {
return IMG_FALSE;
}
- if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID)
- || OSIsResourceLocked(&psSysData->sPowerStateChangeResource,
- ISR_ID)) {
+ if (IsPowerLocked())
return IMG_FALSE;
- }
psPowerDevice = psSysData->psPowerDeviceList;
while (psPowerDevice) {
typedef struct PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER_TAG {
IMG_UINT32 ui32BridgeFlags;
+ IMG_HANDLE hDevCookie;
IMG_UINT32 ui32Type;
- IMG_UINT32 ui32Virt;
+ IMG_VOID *pvVirt;
IMG_UINT32 ui32Length;
} PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER;
PVRSRV_KERNEL_MEM_INFO
** ppsMemInfo);
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV
+ PVRSRVIsWrappedExtMemoryKM(IMG_HANDLE hDevCookie,
+ PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_UINT32 *pui32ByteSize,
+ IMG_VOID **pvLinAddr);
+
IMG_IMPORT
PVRSRV_ERROR IMG_CALLCONV
PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo);
#define SGX_GENERAL_HEAP_SIZE (0x05000000-0x00401000)
#define SGX_GENERAL_MAPPING_HEAP_BASE 0x05000000
-#define SGX_GENERAL_MAPPING_HEAP_SIZE (0x06C00000-0x05001000)
+#define SGX_GENERAL_MAPPING_HEAP_SIZE (0x06800000-0x05001000)
-#define SGX_FB_MAPPING_HEAP_BASE 0x06C00000
-#define SGX_FB_MAPPING_HEAP_SIZE (0x07000000-0x06C01000)
+#define SGX_FB_MAPPING_HEAP_BASE 0x06800000
+#define SGX_FB_MAPPING_HEAP_SIZE (0x07000000-0x06801000)
#define SGX_TADATA_HEAP_BASE 0x07000000
#define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000)
}
if (psDevInfo->hTimer == IMG_NULL) {
+ /*
+ * the magic calculation below sets the hardware lock-up
+ * detection and recovery timer interval to ~150msecs
+ */
psDevInfo->hTimer = OSAddTimer(SGXOSTimer, psDeviceNode,
- 1000 * 50 /
+ 1000 * 150 /
psSGXTimingInfo->
ui32uKernelFreq);
if (psDevInfo->hTimer == IMG_NULL) {
#define SYS_SGX_CLOCK_SPEED 110666666
#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
#define SYS_SGX_PDS_TIMER_FREQ (1000)
-#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (500)
+#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (100)
#define SYS_OMAP3430_VDD2_OPP3_SGX_CLOCK_SPEED SYS_SGX_CLOCK_SPEED
#define SYS_OMAP3430_VDD2_OPP2_SGX_CLOCK_SPEED (SYS_SGX_CLOCK_SPEED / 2)