1 /**********************************************************************
3 * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful but, except
10 * as otherwise stated in writing, without any warranty; without even the
11 * implied warranty of merchantability or fitness for a particular purpose.
12 * See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * Imagination Technologies Ltd. <gpl-support@imgtec.com>
23 * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
25 ******************************************************************************/
29 #include "services_headers.h"
30 #include "buffer_manager.h"
33 #include "pvr_pdump.h"
34 #include "sgxapi_km.h"
35 #include "sgx_bridge_km.h"
37 #include "sgxinfokm.h"
40 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PVR_DEBUG)
41 #include "pvr_debugfs.h"
45 #define UINT32_MAX_VALUE 0xFFFFFFFFUL
48 void *hPTPageOSMemHandle;
50 u32 ui32ValidPTECount;
54 struct PVRSRV_DEVICE_NODE *psDeviceNode;
56 struct IMG_DEV_PHYADDR sPDDevPAddr;
58 struct MMU_PT_INFO *apsPTInfoList[1024];
59 struct PVRSRV_SGXDEV_INFO *psDevInfo;
60 struct MMU_CONTEXT *psNext;
64 struct MMU_CONTEXT *psMMUContext;
70 struct RA_ARENA *psVMArena;
72 struct DEV_ARENA_DESCRIPTOR *psDevArena;
78 void MMU_InvalidateDirectoryCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
80 psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;
83 static void MMU_InvalidatePageTableCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
85 psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;
88 static IMG_BOOL _AllocPageTables(struct MMU_HEAP *pMMUHeap)
90 PVR_DPF(PVR_DBG_MESSAGE, "_AllocPageTables()");
92 PVR_ASSERT(pMMUHeap != NULL);
93 PVR_ASSERT(HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE);
95 if (pMMUHeap == NULL) {
96 PVR_DPF(PVR_DBG_ERROR, "_AllocPageTables: invalid parameter");
100 pMMUHeap->ui32PTEntryCount =
101 pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT;
103 pMMUHeap->ui32PTBaseIndex =
104 (pMMUHeap->psDevArena->BaseDevVAddr.
105 uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >>
108 pMMUHeap->ui32PTPageCount =
109 (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - 1) >>
115 static void _DeferredFreePageTable(struct MMU_HEAP *pMMUHeap, u32 ui32PTIndex)
120 struct SYS_DATA *psSysData;
121 struct MMU_PT_INFO **ppsPTInfoList;
123 if (SysAcquireData(&psSysData) != PVRSRV_OK) {
124 PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePageTables: "
125 "ERROR call to SysAcquireData failed");
130 pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
133 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
136 PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == NULL ||
137 ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount ==
141 PDUMPCOMMENT("Free page table (page count == %08X)",
142 pMMUHeap->ui32PTPageCount);
143 if (ppsPTInfoList[ui32PTIndex]
144 && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
145 PDUMPFREEPAGETABLE(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
147 switch (pMMUHeap->psDevArena->DevMemHeapType) {
148 case DEVICE_MEMORY_HEAP_SHARED:
149 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
151 struct MMU_CONTEXT *psMMUContext =
152 (struct MMU_CONTEXT *)
153 pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
155 while (psMMUContext) {
157 (u32 *) psMMUContext->pvPDCpuVAddr;
158 pui32PDEntry += ui32PDIndex;
159 pui32PDEntry[ui32PTIndex] = 0;
160 PDUMPPAGETABLE((void *) &pui32PDEntry
162 sizeof(u32), IMG_FALSE,
165 psMMUContext = psMMUContext->psNext;
169 case DEVICE_MEMORY_HEAP_PERCONTEXT:
170 case DEVICE_MEMORY_HEAP_KERNEL:
174 (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
175 pui32PDEntry += ui32PDIndex;
176 pui32PDEntry[ui32PTIndex] = 0;
177 PDUMPPAGETABLE((void *) &pui32PDEntry[ui32PTIndex],
178 sizeof(u32), IMG_FALSE,
179 PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
184 PVR_DPF(PVR_DBG_ERROR,
185 "_DeferredFreePagetable: ERROR invalid heap type");
190 if (ppsPTInfoList[ui32PTIndex] != NULL) {
191 if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != NULL) {
195 (u32 *) ppsPTInfoList[ui32PTIndex]->
199 (i < pMMUHeap->ui32PTEntryCount) && (i < 1024);
203 if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
204 psLocalDevMemArena == NULL) {
205 OSFreePages(PVRSRV_HAP_WRITECOMBINE |
206 PVRSRV_HAP_KERNEL_ONLY,
208 ppsPTInfoList[ui32PTIndex]->
210 ppsPTInfoList[ui32PTIndex]->
213 struct IMG_SYS_PHYADDR sSysPAddr;
214 struct IMG_CPU_PHYADDR sCpuPAddr;
217 OSMapLinToCPUPhys(ppsPTInfoList
220 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
222 OSUnMapPhysToLin((void __force __iomem *)
223 ppsPTInfoList[ui32PTIndex]->
226 PVRSRV_HAP_WRITECOMBINE |
227 PVRSRV_HAP_KERNEL_ONLY,
228 ppsPTInfoList[ui32PTIndex]->
231 RA_Free(pMMUHeap->psDevArena->
232 psDeviceMemoryHeapInfo->
234 sSysPAddr.uiAddr, IMG_FALSE);
237 pMMUHeap->ui32PTEntryCount -= i;
239 pMMUHeap->ui32PTEntryCount -= 1024;
242 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
243 sizeof(struct MMU_PT_INFO),
244 ppsPTInfoList[ui32PTIndex], NULL);
245 ppsPTInfoList[ui32PTIndex] = NULL;
247 pMMUHeap->ui32PTEntryCount -= 1024;
250 PDUMPCOMMENT("Finished free page table (page count == %08X)",
251 pMMUHeap->ui32PTPageCount);
254 static void _DeferredFreePageTables(struct MMU_HEAP *pMMUHeap)
258 for (i = 0; i < pMMUHeap->ui32PTPageCount; i++)
259 _DeferredFreePageTable(pMMUHeap, i);
260 MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
263 static IMG_BOOL _DeferredAllocPagetables(struct MMU_HEAP *pMMUHeap,
264 struct IMG_DEV_VIRTADDR DevVAddr, u32 ui32Size)
270 struct MMU_PT_INFO **ppsPTInfoList;
271 struct SYS_DATA *psSysData;
272 struct IMG_DEV_VIRTADDR sHighDevVAddr;
274 PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE));
276 if (SysAcquireData(&psSysData) != PVRSRV_OK)
280 DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
282 if ((UINT32_MAX_VALUE - DevVAddr.uiAddr) <
283 (ui32Size + (1 << (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT)) - 1)) {
285 sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
287 sHighDevVAddr.uiAddr = DevVAddr.uiAddr + ui32Size +
288 (1 << (SGX_MMU_PAGE_SHIFT +
289 SGX_MMU_PT_SHIFT)) - 1;
293 sHighDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
295 ui32PTPageCount -= ui32PDIndex;
297 pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
298 pui32PDEntry += ui32PDIndex;
300 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
302 PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
303 PDUMPCOMMENT("Page directory mods (page count == %08X)",
306 for (i = 0; i < ui32PTPageCount; i++) {
307 if (ppsPTInfoList[i] == NULL) {
308 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
309 sizeof(struct MMU_PT_INFO),
310 (void **) &ppsPTInfoList[i], NULL)
312 PVR_DPF(PVR_DBG_ERROR,
313 "_DeferredAllocPagetables: "
314 "ERROR call to OSAllocMem failed");
317 OSMemSet(ppsPTInfoList[i], 0,
318 sizeof(struct MMU_PT_INFO));
321 if (ppsPTInfoList[i]->hPTPageOSMemHandle == NULL &&
322 ppsPTInfoList[i]->PTPageCpuVAddr == NULL) {
323 struct IMG_CPU_PHYADDR sCpuPAddr;
324 struct IMG_DEV_PHYADDR sDevPAddr;
326 PVR_ASSERT(pui32PDEntry[i] == 0);
328 if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
329 psLocalDevMemArena == NULL) {
330 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
331 PVRSRV_HAP_KERNEL_ONLY,
334 (void **)&ppsPTInfoList[i]->
337 hPTPageOSMemHandle) !=
339 PVR_DPF(PVR_DBG_ERROR,
340 "_DeferredAllocPagetables: "
341 "ERROR call to OSAllocPages failed");
345 if (ppsPTInfoList[i]->PTPageCpuVAddr) {
347 OSMapLinToCPUPhys(ppsPTInfoList[i]->
351 OSMemHandleToCpuPAddr(
357 SysCpuPAddrToDevPAddr
358 (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
360 struct IMG_SYS_PHYADDR sSysPAddr;
362 if (RA_Alloc(pMMUHeap->psDevArena->
363 psDeviceMemoryHeapInfo->psLocalDevMemArena,
364 SGX_MMU_PAGE_SIZE, NULL, 0,
366 &(sSysPAddr.uiAddr)) != IMG_TRUE) {
367 PVR_DPF(PVR_DBG_ERROR,
368 "_DeferredAllocPagetables: "
369 "ERROR call to RA_Alloc failed");
373 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
374 ppsPTInfoList[i]->PTPageCpuVAddr =
376 OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
377 PVRSRV_HAP_WRITECOMBINE |
378 PVRSRV_HAP_KERNEL_ONLY,
381 if (!ppsPTInfoList[i]->PTPageCpuVAddr) {
382 PVR_DPF(PVR_DBG_ERROR,
383 "_DeferredAllocPagetables: "
384 "ERROR failed to map page tables");
388 sDevPAddr = SysCpuPAddrToDevPAddr
389 (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
394 OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0,
397 PDUMPMALLOCPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
400 PDUMPPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
401 SGX_MMU_PAGE_SIZE, IMG_TRUE,
402 PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
404 switch (pMMUHeap->psDevArena->DevMemHeapType) {
405 case DEVICE_MEMORY_HEAP_SHARED:
406 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
408 struct MMU_CONTEXT *psMMUContext =
409 (struct MMU_CONTEXT *)pMMUHeap->
410 psMMUContext->psDevInfo->
413 while (psMMUContext) {
415 (u32 *)psMMUContext->
417 pui32PDEntry += ui32PDIndex;
424 ((void *)&pui32PDEntry[i],
425 sizeof(u32), IMG_FALSE,
430 psMMUContext->psNext;
434 case DEVICE_MEMORY_HEAP_PERCONTEXT:
435 case DEVICE_MEMORY_HEAP_KERNEL:
437 pui32PDEntry[i] = sDevPAddr.uiAddr |
440 PDUMPPAGETABLE((void *)&pui32PDEntry[i],
441 sizeof(u32), IMG_FALSE,
449 PVR_DPF(PVR_DBG_ERROR,
450 "_DeferredAllocPagetables: "
451 "ERROR invalid heap type");
457 MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->
461 PVR_ASSERT(pui32PDEntry[i] != 0);
468 enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode,
469 struct MMU_CONTEXT **ppsMMUContext,
470 struct IMG_DEV_PHYADDR *psPDDevPAddr)
475 struct IMG_DEV_PHYADDR sPDDevPAddr;
476 struct IMG_CPU_PHYADDR sCpuPAddr;
477 struct IMG_SYS_PHYADDR sSysPAddr;
478 struct MMU_CONTEXT *psMMUContext;
479 void *hPDOSMemHandle;
480 struct SYS_DATA *psSysData;
481 struct PVRSRV_SGXDEV_INFO *psDevInfo;
483 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Initialise");
485 if (SysAcquireData(&psSysData) != PVRSRV_OK) {
486 PVR_DPF(PVR_DBG_ERROR,
487 "MMU_Initialise: ERROR call to SysAcquireData failed");
488 return PVRSRV_ERROR_GENERIC;
491 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
492 sizeof(struct MMU_CONTEXT), (void **) &psMMUContext, NULL)
494 PVR_DPF(PVR_DBG_ERROR,
495 "MMU_Initialise: ERROR call to OSAllocMem failed");
496 return PVRSRV_ERROR_GENERIC;
498 OSMemSet(psMMUContext, 0, sizeof(struct MMU_CONTEXT));
500 psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
501 psMMUContext->psDevInfo = psDevInfo;
503 psMMUContext->psDeviceNode = psDeviceNode;
505 if (psDeviceNode->psLocalDevMemArena == NULL) {
507 (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
508 SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr,
509 &hPDOSMemHandle) != PVRSRV_OK) {
510 PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
511 "ERROR call to OSAllocPages failed");
516 sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
518 sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
520 SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
522 if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
523 SGX_MMU_PAGE_SIZE, NULL, 0, SGX_MMU_PAGE_SIZE,
524 &(sSysPAddr.uiAddr)) != IMG_TRUE) {
525 PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
526 "ERROR call to RA_Alloc failed");
531 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
533 SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
534 pvPDCpuVAddr = (void __force *)
535 OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
536 PVRSRV_HAP_WRITECOMBINE |
537 PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle);
539 PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
540 "ERROR failed to map page tables");
546 PDUMPCOMMENT("Alloc page directory");
548 PDUMPMALLOCPAGETABLE(pvPDCpuVAddr, PDUMP_PD_UNIQUETAG);
551 pui32Tmp = (u32 *) pvPDCpuVAddr;
553 PVR_DPF(PVR_DBG_ERROR,
554 "MMU_Initialise: pvPDCpuVAddr invalid");
558 for (i = 0; i < SGX_MMU_PD_SIZE; i++)
561 PDUMPCOMMENT("Page directory contents");
562 PDUMPPAGETABLE(pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, IMG_TRUE,
563 PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
565 psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
566 psMMUContext->sPDDevPAddr = sPDDevPAddr;
567 psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
569 *ppsMMUContext = psMMUContext;
571 *psPDDevPAddr = sPDDevPAddr;
573 psMMUContext->psNext = (struct MMU_CONTEXT *)
574 psDevInfo->pvMMUContextList;
575 psDevInfo->pvMMUContextList = (void *) psMMUContext;
580 if (psDeviceNode->psLocalDevMemArena)
581 OSUnMapPhysToLin((void __iomem __force *)pvPDCpuVAddr,
582 SGX_MMU_PAGE_SIZE, PVRSRV_HAP_WRITECOMBINE |
583 PVRSRV_HAP_KERNEL_ONLY,
586 if (!psDeviceNode->psLocalDevMemArena)
587 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
588 SGX_MMU_PAGE_SIZE, pvPDCpuVAddr, hPDOSMemHandle);
590 RA_Free(psDeviceNode->psLocalDevMemArena,
591 sSysPAddr.uiAddr, IMG_FALSE);
593 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
596 return PVRSRV_ERROR_GENERIC;
599 void MMU_Finalise(struct MMU_CONTEXT *psMMUContext)
602 struct SYS_DATA *psSysData;
603 struct MMU_CONTEXT **ppsMMUContext;
605 if (SysAcquireData(&psSysData) != PVRSRV_OK) {
606 PVR_DPF(PVR_DBG_ERROR,
607 "MMU_Finalise: ERROR call to SysAcquireData failed");
611 PDUMPCOMMENT("Free page directory");
612 PDUMPFREEPAGETABLE(psMMUContext->pvPDCpuVAddr);
614 pui32Tmp = (u32 *) psMMUContext->pvPDCpuVAddr;
616 for (i = 0; i < SGX_MMU_PD_SIZE; i++)
619 if (psMMUContext->psDeviceNode->psLocalDevMemArena == NULL) {
620 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
622 psMMUContext->pvPDCpuVAddr,
623 psMMUContext->hPDOSMemHandle);
626 struct IMG_SYS_PHYADDR sSysPAddr;
627 struct IMG_CPU_PHYADDR sCpuPAddr;
629 sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
630 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
632 OSUnMapPhysToLin((void __iomem __force *)
633 psMMUContext->pvPDCpuVAddr,
635 PVRSRV_HAP_WRITECOMBINE |
636 PVRSRV_HAP_KERNEL_ONLY,
637 psMMUContext->hPDOSMemHandle);
639 RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena,
640 sSysPAddr.uiAddr, IMG_FALSE);
644 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Finalise");
647 (struct MMU_CONTEXT **) &psMMUContext->psDevInfo->pvMMUContextList;
648 while (*ppsMMUContext) {
649 if (*ppsMMUContext == psMMUContext) {
651 *ppsMMUContext = psMMUContext->psNext;
655 ppsMMUContext = &((*ppsMMUContext)->psNext);
658 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
662 void MMU_InsertHeap(struct MMU_CONTEXT *psMMUContext,
663 struct MMU_HEAP *psMMUHeap)
665 u32 *pui32PDCpuVAddr = (u32 *)psMMUContext->pvPDCpuVAddr;
666 u32 *pui32KernelPDCpuVAddr = (u32 *)
667 psMMUHeap->psMMUContext->pvPDCpuVAddr;
669 IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
672 psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
674 pui32KernelPDCpuVAddr +=
675 psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
678 PDUMPCOMMENT("Page directory shared heap range copy");
680 for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PTPageCount;
683 PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
685 pui32PDCpuVAddr[ui32PDEntry] =
686 pui32KernelPDCpuVAddr[ui32PDEntry];
687 if (pui32PDCpuVAddr[ui32PDEntry]) {
688 PDUMPPAGETABLE((void *) &pui32PDCpuVAddr[ui32PDEntry],
689 sizeof(u32), IMG_FALSE,
690 PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
692 bInvalidateDirectoryCache = IMG_TRUE;
696 if (bInvalidateDirectoryCache)
697 MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
701 static void MMU_PDumpPageTables(struct MMU_HEAP *pMMUHeap,
702 struct IMG_DEV_VIRTADDR DevVAddr,
703 size_t uSize, IMG_BOOL bForUnmap, void *hUniqueTag)
705 u32 ui32NumPTEntries;
709 struct MMU_PT_INFO **ppsPTInfoList;
714 (uSize + SGX_MMU_PAGE_SIZE - 1) >> SGX_MMU_PAGE_SHIFT;
717 DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
719 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
721 ui32PTIndex = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
723 PDUMPCOMMENT("Page table mods (num entries == %08X) %s",
724 ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
726 while (ui32NumPTEntries > 0) {
727 struct MMU_PT_INFO *psPTInfo = *ppsPTInfoList++;
729 if (ui32NumPTEntries <= 1024 - ui32PTIndex)
730 ui32PTDumpCount = ui32NumPTEntries;
732 ui32PTDumpCount = 1024 - ui32PTIndex;
735 pui32PTEntry = (u32 *)psPTInfo->PTPageCpuVAddr;
736 PDUMPPAGETABLE((void *)&pui32PTEntry[ui32PTIndex],
737 ui32PTDumpCount * sizeof(u32), IMG_FALSE,
738 PDUMP_PT_UNIQUETAG, hUniqueTag);
741 ui32NumPTEntries -= ui32PTDumpCount;
746 PDUMPCOMMENT("Finished page table mods %s",
747 bForUnmap ? "(for unmap)" : "");
751 static void MMU_UnmapPagesAndFreePTs(struct MMU_HEAP *psMMUHeap,
752 struct IMG_DEV_VIRTADDR sDevVAddr,
753 u32 ui32PageCount, void *hUniqueTag)
755 u32 uPageSize = HOST_PAGESIZE();
756 struct IMG_DEV_VIRTADDR sTmpDevVAddr;
761 IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
764 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
767 sTmpDevVAddr = sDevVAddr;
769 for (i = 0; i < ui32PageCount; i++) {
770 struct MMU_PT_INFO **ppsPTInfoList;
773 sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
777 &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
780 ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK)
781 >> SGX_MMU_PAGE_SHIFT;
783 if (!ppsPTInfoList[0]) {
784 PVR_DPF(PVR_DBG_MESSAGE,
785 "MMU_UnmapPagesAndFreePTs: "
786 "Invalid PT for alloc at VAddr:0x%08lX "
787 "(VaddrIni:0x%08lX AllocPage:%u) "
789 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
790 i, ui32PDIndex, ui32PTIndex);
792 sTmpDevVAddr.uiAddr += uPageSize;
797 pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
802 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) {
803 ppsPTInfoList[0]->ui32ValidPTECount--;
805 PVR_DPF(PVR_DBG_MESSAGE,
806 "MMU_UnmapPagesAndFreePTs: "
807 "Page is already invalid for alloc at "
809 "(VAddrIni:0x%08lX AllocPage:%u) "
811 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
812 i, ui32PDIndex, ui32PTIndex);
815 PVR_ASSERT((s32)ppsPTInfoList[0]->ui32ValidPTECount >=
817 pui32Tmp[ui32PTIndex] = 0;
821 && ppsPTInfoList[0]->ui32ValidPTECount == 0) {
822 _DeferredFreePageTable(psMMUHeap,
823 ui32PDIndex - (psMMUHeap->
826 bInvalidateDirectoryCache = IMG_TRUE;
829 sTmpDevVAddr.uiAddr += uPageSize;
832 if (bInvalidateDirectoryCache) {
833 MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->
836 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->
841 MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
842 IMG_TRUE, hUniqueTag);
846 static void MMU_FreePageTables(void *pvMMUHeap, u32 ui32Start, u32 ui32End,
849 struct MMU_HEAP *pMMUHeap = (struct MMU_HEAP *)pvMMUHeap;
850 struct IMG_DEV_VIRTADDR Start;
852 Start.uiAddr = ui32Start;
854 MMU_UnmapPagesAndFreePTs(pMMUHeap, Start,
855 (ui32End - ui32Start) / SGX_MMU_PAGE_SIZE,
859 struct MMU_HEAP *MMU_Create(struct MMU_CONTEXT *psMMUContext,
860 struct DEV_ARENA_DESCRIPTOR *psDevArena,
861 struct RA_ARENA **ppsVMArena)
863 struct MMU_HEAP *pMMUHeap;
866 PVR_ASSERT(psDevArena != NULL);
868 if (psDevArena == NULL) {
869 PVR_DPF(PVR_DBG_ERROR, "MMU_Create: invalid parameter");
873 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
874 sizeof(struct MMU_HEAP), (void **)&pMMUHeap, NULL)
876 PVR_DPF(PVR_DBG_ERROR,
877 "MMU_Create: ERROR call to OSAllocMem failed");
881 pMMUHeap->psMMUContext = psMMUContext;
882 pMMUHeap->psDevArena = psDevArena;
884 bRes = _AllocPageTables(pMMUHeap);
886 PVR_DPF(PVR_DBG_ERROR,
887 "MMU_Create: ERROR call to _AllocPageTables failed");
888 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
893 pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
894 psDevArena->BaseDevVAddr.uiAddr,
895 psDevArena->ui32Size, NULL,
896 SGX_MMU_PAGE_SIZE, NULL, NULL,
897 MMU_FreePageTables, pMMUHeap);
899 if (pMMUHeap->psVMArena == NULL) {
900 PVR_DPF(PVR_DBG_ERROR,
901 "MMU_Create: ERROR call to RA_Create failed");
902 _DeferredFreePageTables(pMMUHeap);
903 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
908 *ppsVMArena = pMMUHeap->psVMArena;
913 void MMU_Delete(struct MMU_HEAP *pMMUHeap)
915 if (pMMUHeap != NULL) {
916 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Delete");
918 if (pMMUHeap->psVMArena)
919 RA_Delete(pMMUHeap->psVMArena);
920 _DeferredFreePageTables(pMMUHeap);
922 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
927 IMG_BOOL MMU_Alloc(struct MMU_HEAP *pMMUHeap, size_t uSize, u32 uFlags,
928 u32 uDevVAddrAlignment, struct IMG_DEV_VIRTADDR *psDevVAddr)
932 PVR_DPF(PVR_DBG_MESSAGE,
933 "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
934 uSize, uFlags, uDevVAddrAlignment);
936 if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) {
937 bStatus = RA_Alloc(pMMUHeap->psVMArena, uSize, NULL, 0,
938 uDevVAddrAlignment, &(psDevVAddr->uiAddr));
940 PVR_DPF(PVR_DBG_ERROR,
941 "MMU_Alloc: RA_Alloc of VMArena failed");
946 bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
950 PVR_DPF(PVR_DBG_ERROR,
951 "MMU_Alloc: _DeferredAllocPagetables failed");
952 if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
953 RA_Free(pMMUHeap->psVMArena, psDevVAddr->uiAddr,
960 void MMU_Free(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
963 PVR_ASSERT(pMMUHeap != NULL);
965 if (pMMUHeap == NULL) {
966 PVR_DPF(PVR_DBG_ERROR, "MMU_Free: invalid parameter");
970 PVR_DPF(PVR_DBG_MESSAGE,
971 "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap,
974 if ((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
975 (DevVAddr.uiAddr + ui32Size <=
976 pMMUHeap->psDevArena->BaseDevVAddr.uiAddr +
977 pMMUHeap->psDevArena->ui32Size)) {
978 RA_Free(pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
984 PVR_DPF(PVR_DBG_ERROR,
985 "MMU_Free: Couldn't find DevVAddr %08X in a DevArena",
989 void MMU_Enable(struct MMU_HEAP *pMMUHeap)
991 PVR_UNREFERENCED_PARAMETER(pMMUHeap);
995 void MMU_Disable(struct MMU_HEAP *pMMUHeap)
997 PVR_UNREFERENCED_PARAMETER(pMMUHeap);
1001 static void MMU_MapPage(struct MMU_HEAP *pMMUHeap,
1002 struct IMG_DEV_VIRTADDR DevVAddr,
1003 struct IMG_DEV_PHYADDR DevPAddr, u32 ui32MemFlags)
1007 u32 ui32MMUFlags = 0;
1008 struct MMU_PT_INFO **ppsPTInfoList;
1010 if (((PVRSRV_MEM_READ | PVRSRV_MEM_WRITE) & ui32MemFlags) ==
1011 (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE))
1013 else if (PVRSRV_MEM_READ & ui32MemFlags)
1014 ui32MMUFlags |= SGX_MMU_PTE_READONLY;
1015 else if (PVRSRV_MEM_WRITE & ui32MemFlags)
1016 ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
1018 if (PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
1019 ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
1021 if (PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
1022 ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
1024 ui32Index = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
1026 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1028 ui32Index = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1030 pui32Tmp = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1033 if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
1034 PVR_DPF(PVR_DBG_ERROR,
1036 "Page is already valid for alloc at "
1037 "VAddr:0x%08lX PDIdx:%u PTIdx:%u",
1039 DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1040 SGX_MMU_PT_SHIFT), ui32Index);
1042 PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
1044 ppsPTInfoList[0]->ui32ValidPTECount++;
1046 pui32Tmp[ui32Index] = (DevPAddr.uiAddr & SGX_MMU_PTE_ADDR_MASK)
1047 | SGX_MMU_PTE_VALID | ui32MMUFlags;
1050 void MMU_MapScatter(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1051 struct IMG_SYS_PHYADDR *psSysAddr, size_t uSize,
1052 u32 ui32MemFlags, void *hUniqueTag)
1055 struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1058 struct IMG_DEV_PHYADDR DevPAddr;
1060 PVR_ASSERT(pMMUHeap != NULL);
1063 MapBaseDevVAddr = DevVAddr;
1065 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1068 for (i = 0, uCount = 0; uCount < uSize;
1069 i++, uCount += SGX_MMU_PAGE_SIZE) {
1070 struct IMG_SYS_PHYADDR sSysAddr;
1072 sSysAddr = psSysAddr[i];
1075 SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
1077 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1078 DevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
1080 PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapScatter: "
1081 "devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
1082 DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize);
1086 MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1091 void MMU_MapPages(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1092 struct IMG_SYS_PHYADDR SysPAddr, size_t uSize,
1093 u32 ui32MemFlags, void *hUniqueTag)
1095 struct IMG_DEV_PHYADDR DevPAddr;
1097 struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1100 u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1101 u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1103 PVR_ASSERT(pMMUHeap != NULL);
1105 PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapPages: "
1106 "mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
1107 pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize);
1110 MapBaseDevVAddr = DevVAddr;
1112 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1115 DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
1117 if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1120 for (uCount = 0; uCount < uSize; uCount += ui32VAdvance) {
1121 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1122 DevVAddr.uiAddr += ui32VAdvance;
1123 DevPAddr.uiAddr += ui32PAdvance;
1127 MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1132 void MMU_MapShadow(struct MMU_HEAP *pMMUHeap,
1133 struct IMG_DEV_VIRTADDR MapBaseDevVAddr,
1134 size_t uByteSize, void *CpuVAddr, void *hOSMemHandle,
1135 struct IMG_DEV_VIRTADDR *pDevVAddr, u32 ui32MemFlags,
1140 struct IMG_DEV_VIRTADDR MapDevVAddr;
1141 u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1142 u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1145 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1148 PVR_DPF(PVR_DBG_MESSAGE,
1149 "MMU_MapShadow: %08X, 0x%x, %08X",
1150 MapBaseDevVAddr.uiAddr, uByteSize, CpuVAddr);
1152 PVR_ASSERT(((u32) CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1153 PVR_ASSERT(((u32) uByteSize & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1154 pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
1156 if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1159 MapDevVAddr = MapBaseDevVAddr;
1160 for (i = 0; i < uByteSize; i += ui32VAdvance) {
1161 struct IMG_CPU_PHYADDR CpuPAddr;
1162 struct IMG_DEV_PHYADDR DevPAddr;
1166 OSMapLinToCPUPhys((void *)((u32)CpuVAddr +
1169 CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
1171 SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
1173 PVR_DPF(PVR_DBG_MESSAGE, "0x%x: CpuVAddr=%08X, "
1174 "CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
1175 uOffset, (u32)CpuVAddr + uOffset, CpuPAddr.uiAddr,
1176 MapDevVAddr.uiAddr, DevPAddr.uiAddr);
1178 MMU_MapPage(pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
1180 MapDevVAddr.uiAddr += ui32VAdvance;
1181 uOffset += ui32PAdvance;
1185 MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE,
1190 void MMU_UnmapPages(struct MMU_HEAP *psMMUHeap,
1191 struct IMG_DEV_VIRTADDR sDevVAddr, u32 ui32PageCount,
1194 u32 uPageSize = HOST_PAGESIZE();
1195 struct IMG_DEV_VIRTADDR sTmpDevVAddr;
1202 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1205 sTmpDevVAddr = sDevVAddr;
1207 for (i = 0; i < ui32PageCount; i++) {
1208 struct MMU_PT_INFO **ppsPTInfoList;
1210 ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1213 ppsPTInfoList = &psMMUHeap->psMMUContext->
1214 apsPTInfoList[ui32PDIndex];
1216 ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >>
1219 if (!ppsPTInfoList[0]) {
1220 PVR_DPF(PVR_DBG_ERROR,
1222 "ERROR Invalid PT for alloc at VAddr:0x%08lX "
1223 "(VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u "
1225 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1226 ui32PDIndex, ui32PTIndex);
1228 sTmpDevVAddr.uiAddr += uPageSize;
1233 pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
1235 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
1236 ppsPTInfoList[0]->ui32ValidPTECount--;
1238 PVR_DPF(PVR_DBG_ERROR,
1239 "MMU_UnmapPages: Page is already invalid "
1240 "for alloc at VAddr:0x%08lX "
1241 "(VAddrIni:0x%08lX AllocPage:%u) "
1242 "PDIdx:%u PTIdx:%u",
1243 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1244 ui32PDIndex, ui32PTIndex);
1246 PVR_ASSERT((s32) ppsPTInfoList[0]->ui32ValidPTECount >= 0);
1248 pui32Tmp[ui32PTIndex] = 0;
1250 sTmpDevVAddr.uiAddr += uPageSize;
1253 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
1256 MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
1257 IMG_TRUE, hUniqueTag);
1261 struct IMG_DEV_PHYADDR MMU_GetPhysPageAddr(struct MMU_HEAP *pMMUHeap,
1262 struct IMG_DEV_VIRTADDR sDevVPageAddr)
1264 u32 *pui32PageTable;
1266 struct IMG_DEV_PHYADDR sDevPAddr;
1267 struct MMU_PT_INFO **ppsPTInfoList;
1269 ui32Index = sDevVPageAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1272 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1273 if (!ppsPTInfoList[0]) {
1274 PVR_DPF(PVR_DBG_ERROR,
1275 "MMU_GetPhysPageAddr: Not mapped in at 0x%08x",
1276 sDevVPageAddr.uiAddr);
1277 sDevPAddr.uiAddr = 0;
1282 (sDevVPageAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1284 pui32PageTable = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1286 sDevPAddr.uiAddr = pui32PageTable[ui32Index];
1288 sDevPAddr.uiAddr &= SGX_MMU_PTE_ADDR_MASK;
1293 struct IMG_DEV_PHYADDR MMU_GetPDDevPAddr(struct MMU_CONTEXT *pMMUContext)
1295 return pMMUContext->sPDDevPAddr;
1298 enum PVRSRV_ERROR SGXGetPhysPageAddrKM(void *hDevMemHeap,
1299 struct IMG_DEV_VIRTADDR sDevVAddr,
1300 struct IMG_DEV_PHYADDR *pDevPAddr,
1301 struct IMG_CPU_PHYADDR *pCpuPAddr)
1303 struct MMU_HEAP *pMMUHeap;
1304 struct IMG_DEV_PHYADDR DevPAddr;
1306 pMMUHeap = (struct MMU_HEAP *)BM_GetMMUHeap(hDevMemHeap);
1308 DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
1309 pCpuPAddr->uiAddr = DevPAddr.uiAddr;
1310 pDevPAddr->uiAddr = DevPAddr.uiAddr;
1312 return (pDevPAddr->uiAddr != 0) ?
1313 PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
1316 enum PVRSRV_ERROR SGXGetMMUPDAddrKM(void *hDevCookie,
1317 void *hDevMemContext,
1318 struct IMG_DEV_PHYADDR *psPDDevPAddr)
1320 if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
1321 return PVRSRV_ERROR_INVALID_PARAMS;
1324 ((struct BM_CONTEXT *)hDevMemContext)->psMMUContext->sPDDevPAddr;
1329 enum PVRSRV_ERROR MMU_BIFResetPDAlloc(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1331 enum PVRSRV_ERROR eError;
1332 struct SYS_DATA *psSysData;
1333 struct RA_ARENA *psLocalDevMemArena;
1334 void *hOSMemHandle = NULL;
1335 u8 *pui8MemBlock = NULL;
1336 struct IMG_SYS_PHYADDR sMemBlockSysPAddr;
1337 struct IMG_CPU_PHYADDR sMemBlockCpuPAddr;
1339 eError = SysAcquireData(&psSysData);
1340 if (eError != PVRSRV_OK) {
1341 PVR_DPF(PVR_DBG_ERROR,
1342 "MMU_BIFResetPDAlloc: ERROR call to SysAcquireData failed");
1346 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1348 if (psLocalDevMemArena == NULL) {
1351 OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
1352 PVRSRV_HAP_KERNEL_ONLY, 3 * SGX_MMU_PAGE_SIZE,
1353 SGX_MMU_PAGE_SIZE, (void **)&pui8MemBlock,
1355 if (eError != PVRSRV_OK) {
1356 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1357 "ERROR call to OSAllocPages failed");
1360 sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
1362 if (RA_Alloc(psLocalDevMemArena, 3 * SGX_MMU_PAGE_SIZE,
1363 NULL, 0, SGX_MMU_PAGE_SIZE,
1364 &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE) {
1365 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1366 "ERROR call to RA_Alloc failed");
1367 return PVRSRV_ERROR_OUT_OF_MEMORY;
1370 sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
1371 pui8MemBlock = (void __force *)OSMapPhysToLin(sMemBlockCpuPAddr,
1372 SGX_MMU_PAGE_SIZE * 3,
1373 PVRSRV_HAP_WRITECOMBINE |
1374 PVRSRV_HAP_KERNEL_ONLY,
1376 if (!pui8MemBlock) {
1377 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1378 "ERROR failed to map page tables");
1379 return PVRSRV_ERROR_BAD_MAPPING;
1383 psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
1384 psDevInfo->sBIFResetPDDevPAddr =
1385 SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
1386 psDevInfo->sBIFResetPTDevPAddr.uiAddr =
1387 psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1388 psDevInfo->sBIFResetPageDevPAddr.uiAddr =
1389 psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1390 psDevInfo->pui32BIFResetPD = (u32 *) pui8MemBlock;
1391 psDevInfo->pui32BIFResetPT =
1392 (u32 *) (pui8MemBlock + SGX_MMU_PAGE_SIZE);
1394 OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
1395 OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
1397 OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB,
1403 void MMU_BIFResetPDFree(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1405 enum PVRSRV_ERROR eError;
1406 struct SYS_DATA *psSysData;
1407 struct RA_ARENA *psLocalDevMemArena;
1408 struct IMG_SYS_PHYADDR sPDSysPAddr;
1410 eError = SysAcquireData(&psSysData);
1411 if (eError != PVRSRV_OK) {
1412 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDFree: "
1413 "ERROR call to SysAcquireData failed");
1417 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1419 if (psLocalDevMemArena == NULL) {
1420 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1421 3 * SGX_MMU_PAGE_SIZE,
1422 psDevInfo->pui32BIFResetPD,
1423 psDevInfo->hBIFResetPDOSMemHandle);
1425 OSUnMapPhysToLin((void __force __iomem *)
1426 psDevInfo->pui32BIFResetPD,
1427 3 * SGX_MMU_PAGE_SIZE,
1428 PVRSRV_HAP_WRITECOMBINE |
1429 PVRSRV_HAP_KERNEL_ONLY,
1430 psDevInfo->hBIFResetPDOSMemHandle);
1433 SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX,
1434 psDevInfo->sBIFResetPDDevPAddr);
1435 RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
1439 u32 mmu_get_page_dir(struct MMU_CONTEXT *psMMUContext)
1441 return psMMUContext->sPDDevPAddr.uiAddr;
1445 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PVR_DEBUG)
1448 hwrec_mem_dump_page(u32 dev_p_addr)
1452 page = ioremap_nocache(dev_p_addr, SGX_MMU_PAGE_SIZE);
1456 /* Loop through all the pages and dump them */
1457 hwrec_mem_print("<PAGE PA:0x%08X>\n", dev_p_addr);
1458 hwrec_mem_write((void __force *) page, PAGE_SIZE);
1459 hwrec_mem_print("</PAGE>\n");
1467 hwrec_mem_dump_table(u32 dev_p_addr)
1472 pt = ioremap_nocache(dev_p_addr, SGX_MMU_PAGE_SIZE);
1476 /* Loop through all the page tables and dump them */
1477 hwrec_mem_print("<TABLE PA:0x%08X>\n", dev_p_addr);
1478 for (i = 0 ; i < 1024 ; i++)
1479 hwrec_mem_print("0x%08X\n", readl(pt + 4 * i));
1480 hwrec_mem_print("</TABLE>\n");
1482 for (i = 0; i < 1024; i++) {
1483 u32 addr = readl(pt + 4 * i);
1485 if (addr & SGX_MMU_PDE_VALID)
1486 hwrec_mem_dump_page(addr & SGX_MMU_PDE_ADDR_MASK);
1495 hwrec_mem_dump_dir(struct MMU_CONTEXT *context)
1497 void __iomem *pd = (void __force __iomem *) context->pvPDCpuVAddr;
1501 hwrec_mem_print("<DIR PA:0x%08X>\n", context->sPDDevPAddr);
1503 for (i = 0; i < 1024; i++)
1504 hwrec_mem_print("0x%08X\n", readl(pd + 4 * i));
1506 hwrec_mem_print("</DIR>\n");
1508 for (i = 0; i < 1024; i++) {
1509 u32 addr = readl(pd + 4 * i);
1511 if (addr & SGX_MMU_PDE_VALID)
1512 hwrec_mem_dump_table(addr & SGX_MMU_PDE_ADDR_MASK);
1519 mmu_hwrec_mem_dump(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1521 struct MMU_CONTEXT *context = psDevInfo->pvMMUContextList;
1524 page_dir = readl(psDevInfo->pvRegsBaseKM + EUR_CR_BIF_DIR_LIST_BASE0);
1527 if (context->sPDDevPAddr.uiAddr == page_dir)
1530 context = context->psNext;
1534 pr_err("Unable to find matching context for page directory"
1535 " 0x%08X\n", page_dir);
1539 return hwrec_mem_dump_dir(context);
1542 #endif /* CONFIG_DEBUG_FS && CONFIG_PVR_DEBUG */