1 /**********************************************************************
3 * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful but, except
10 * as otherwise stated in writing, without any warranty; without even the
11 * implied warranty of merchantability or fitness for a particular purpose.
12 * See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * Imagination Technologies Ltd. <gpl-support@imgtec.com>
23 * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
25 ******************************************************************************/
29 #include "services_headers.h"
30 #include "buffer_manager.h"
33 #include "pvr_pdump.h"
34 #include "sgxapi_km.h"
35 #include "sgx_bridge_km.h"
37 #include "sgxinfokm.h"
40 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PVR_DEBUG)
41 #include "pvr_debugfs.h"
45 #define UINT32_MAX_VALUE 0xFFFFFFFFUL
48 void *hPTPageOSMemHandle;
50 u32 ui32ValidPTECount;
54 struct PVRSRV_DEVICE_NODE *psDeviceNode;
56 struct IMG_DEV_PHYADDR sPDDevPAddr;
58 struct MMU_PT_INFO *apsPTInfoList[1024];
59 struct PVRSRV_SGXDEV_INFO *psDevInfo;
60 struct MMU_CONTEXT *psNext;
64 struct MMU_CONTEXT *psMMUContext;
70 struct RA_ARENA *psVMArena;
72 struct DEV_ARENA_DESCRIPTOR *psDevArena;
78 void MMU_InvalidateDirectoryCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
80 psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;
83 static void MMU_InvalidatePageTableCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
85 psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;
88 static IMG_BOOL _AllocPageTables(struct MMU_HEAP *pMMUHeap)
90 PVR_DPF(PVR_DBG_MESSAGE, "_AllocPageTables()");
92 PVR_ASSERT(pMMUHeap != NULL);
93 PVR_ASSERT(HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE);
95 if (pMMUHeap == NULL) {
96 PVR_DPF(PVR_DBG_ERROR, "_AllocPageTables: invalid parameter");
100 pMMUHeap->ui32PTEntryCount =
101 pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT;
103 pMMUHeap->ui32PTBaseIndex =
104 (pMMUHeap->psDevArena->BaseDevVAddr.
105 uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >>
108 pMMUHeap->ui32PTPageCount =
109 (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - 1) >>
115 static void _DeferredFreePageTable(struct MMU_HEAP *pMMUHeap, u32 ui32PTIndex)
120 struct SYS_DATA *psSysData;
121 struct MMU_PT_INFO **ppsPTInfoList;
123 if (SysAcquireData(&psSysData) != PVRSRV_OK) {
124 PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePageTables: "
125 "ERROR call to SysAcquireData failed");
130 pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
133 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
136 PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == NULL ||
137 ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount ==
141 PDUMPCOMMENT("Free page table (page count == %08X)",
142 pMMUHeap->ui32PTPageCount);
143 if (ppsPTInfoList[ui32PTIndex]
144 && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
145 PDUMPFREEPAGETABLE(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
147 switch (pMMUHeap->psDevArena->DevMemHeapType) {
148 case DEVICE_MEMORY_HEAP_SHARED:
149 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
151 struct MMU_CONTEXT *psMMUContext =
152 (struct MMU_CONTEXT *)
153 pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
155 while (psMMUContext) {
157 (u32 *) psMMUContext->pvPDCpuVAddr;
158 pui32PDEntry += ui32PDIndex;
159 pui32PDEntry[ui32PTIndex] = 0;
160 PDUMPPAGETABLE((void *) &pui32PDEntry
162 sizeof(u32), IMG_FALSE,
165 psMMUContext = psMMUContext->psNext;
169 case DEVICE_MEMORY_HEAP_PERCONTEXT:
170 case DEVICE_MEMORY_HEAP_KERNEL:
174 (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
175 pui32PDEntry += ui32PDIndex;
176 pui32PDEntry[ui32PTIndex] = 0;
177 PDUMPPAGETABLE((void *) &pui32PDEntry[ui32PTIndex],
178 sizeof(u32), IMG_FALSE,
179 PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
184 PVR_DPF(PVR_DBG_ERROR,
185 "_DeferredFreePagetable: ERROR invalid heap type");
190 if (ppsPTInfoList[ui32PTIndex] != NULL) {
191 if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != NULL) {
195 (u32 *) ppsPTInfoList[ui32PTIndex]->
199 (i < pMMUHeap->ui32PTEntryCount) && (i < 1024);
203 if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
204 psLocalDevMemArena == NULL) {
205 OSFreePages(PVRSRV_HAP_WRITECOMBINE |
206 PVRSRV_HAP_KERNEL_ONLY,
208 ppsPTInfoList[ui32PTIndex]->
210 ppsPTInfoList[ui32PTIndex]->
213 struct IMG_SYS_PHYADDR sSysPAddr;
214 struct IMG_CPU_PHYADDR sCpuPAddr;
217 OSMapLinToCPUPhys(ppsPTInfoList
220 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
222 OSUnMapPhysToLin((void __force __iomem *)
223 ppsPTInfoList[ui32PTIndex]->
226 PVRSRV_HAP_WRITECOMBINE |
227 PVRSRV_HAP_KERNEL_ONLY,
228 ppsPTInfoList[ui32PTIndex]->
231 RA_Free(pMMUHeap->psDevArena->
232 psDeviceMemoryHeapInfo->
234 sSysPAddr.uiAddr, IMG_FALSE);
237 pMMUHeap->ui32PTEntryCount -= i;
239 pMMUHeap->ui32PTEntryCount -= 1024;
242 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
243 sizeof(struct MMU_PT_INFO),
244 ppsPTInfoList[ui32PTIndex], NULL);
245 ppsPTInfoList[ui32PTIndex] = NULL;
247 pMMUHeap->ui32PTEntryCount -= 1024;
250 PDUMPCOMMENT("Finished free page table (page count == %08X)",
251 pMMUHeap->ui32PTPageCount);
254 static void _DeferredFreePageTables(struct MMU_HEAP *pMMUHeap)
258 for (i = 0; i < pMMUHeap->ui32PTPageCount; i++)
259 _DeferredFreePageTable(pMMUHeap, i);
260 MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
263 static IMG_BOOL _DeferredAllocPagetables(struct MMU_HEAP *pMMUHeap,
264 struct IMG_DEV_VIRTADDR DevVAddr, u32 ui32Size)
270 struct MMU_PT_INFO **ppsPTInfoList;
271 struct SYS_DATA *psSysData;
272 struct IMG_DEV_VIRTADDR sHighDevVAddr;
274 PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE));
276 if (SysAcquireData(&psSysData) != PVRSRV_OK)
280 DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
282 if ((UINT32_MAX_VALUE - DevVAddr.uiAddr) <
283 (ui32Size + (1 << (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT)) - 1)) {
285 sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
287 sHighDevVAddr.uiAddr = DevVAddr.uiAddr + ui32Size +
288 (1 << (SGX_MMU_PAGE_SHIFT +
289 SGX_MMU_PT_SHIFT)) - 1;
293 sHighDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
295 ui32PTPageCount -= ui32PDIndex;
297 pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
298 pui32PDEntry += ui32PDIndex;
300 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
302 PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
303 PDUMPCOMMENT("Page directory mods (page count == %08X)",
306 for (i = 0; i < ui32PTPageCount; i++) {
307 if (ppsPTInfoList[i] == NULL) {
308 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
309 sizeof(struct MMU_PT_INFO),
310 (void **) &ppsPTInfoList[i], NULL)
312 PVR_DPF(PVR_DBG_ERROR,
313 "_DeferredAllocPagetables: "
314 "ERROR call to OSAllocMem failed");
317 OSMemSet(ppsPTInfoList[i], 0,
318 sizeof(struct MMU_PT_INFO));
321 if (ppsPTInfoList[i]->hPTPageOSMemHandle == NULL &&
322 ppsPTInfoList[i]->PTPageCpuVAddr == NULL) {
323 struct IMG_CPU_PHYADDR sCpuPAddr;
324 struct IMG_DEV_PHYADDR sDevPAddr;
326 PVR_ASSERT(pui32PDEntry[i] == 0);
328 if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
329 psLocalDevMemArena == NULL) {
330 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
331 PVRSRV_HAP_KERNEL_ONLY,
334 (void **)&ppsPTInfoList[i]->
337 hPTPageOSMemHandle) !=
339 PVR_DPF(PVR_DBG_ERROR,
340 "_DeferredAllocPagetables: "
341 "ERROR call to OSAllocPages failed");
345 if (ppsPTInfoList[i]->PTPageCpuVAddr) {
347 OSMapLinToCPUPhys(ppsPTInfoList[i]->
351 OSMemHandleToCpuPAddr(
357 SysCpuPAddrToDevPAddr
358 (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
360 struct IMG_SYS_PHYADDR sSysPAddr;
362 if (RA_Alloc(pMMUHeap->psDevArena->
363 psDeviceMemoryHeapInfo->psLocalDevMemArena,
364 SGX_MMU_PAGE_SIZE, NULL, 0,
366 &(sSysPAddr.uiAddr)) != IMG_TRUE) {
367 PVR_DPF(PVR_DBG_ERROR,
368 "_DeferredAllocPagetables: "
369 "ERROR call to RA_Alloc failed");
373 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
374 ppsPTInfoList[i]->PTPageCpuVAddr =
376 OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
377 PVRSRV_HAP_WRITECOMBINE |
378 PVRSRV_HAP_KERNEL_ONLY,
381 if (!ppsPTInfoList[i]->PTPageCpuVAddr) {
382 PVR_DPF(PVR_DBG_ERROR,
383 "_DeferredAllocPagetables: "
384 "ERROR failed to map page tables");
388 sDevPAddr = SysCpuPAddrToDevPAddr
389 (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
394 OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0,
397 PDUMPMALLOCPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
400 PDUMPPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
401 SGX_MMU_PAGE_SIZE, IMG_TRUE,
402 PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
404 switch (pMMUHeap->psDevArena->DevMemHeapType) {
405 case DEVICE_MEMORY_HEAP_SHARED:
406 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
408 struct MMU_CONTEXT *psMMUContext =
409 (struct MMU_CONTEXT *)pMMUHeap->
410 psMMUContext->psDevInfo->
413 while (psMMUContext) {
415 (u32 *)psMMUContext->
417 pui32PDEntry += ui32PDIndex;
424 ((void *)&pui32PDEntry[i],
425 sizeof(u32), IMG_FALSE,
430 psMMUContext->psNext;
434 case DEVICE_MEMORY_HEAP_PERCONTEXT:
435 case DEVICE_MEMORY_HEAP_KERNEL:
437 pui32PDEntry[i] = sDevPAddr.uiAddr |
440 PDUMPPAGETABLE((void *)&pui32PDEntry[i],
441 sizeof(u32), IMG_FALSE,
449 PVR_DPF(PVR_DBG_ERROR,
450 "_DeferredAllocPagetables: "
451 "ERROR invalid heap type");
457 MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->
461 PVR_ASSERT(pui32PDEntry[i] != 0);
468 enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode,
469 struct MMU_CONTEXT **ppsMMUContext,
470 struct IMG_DEV_PHYADDR *psPDDevPAddr)
475 struct IMG_DEV_PHYADDR sPDDevPAddr;
476 struct IMG_CPU_PHYADDR sCpuPAddr;
477 struct MMU_CONTEXT *psMMUContext;
478 void *hPDOSMemHandle;
479 struct SYS_DATA *psSysData;
480 struct PVRSRV_SGXDEV_INFO *psDevInfo;
482 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Initialise");
484 if (SysAcquireData(&psSysData) != PVRSRV_OK) {
485 PVR_DPF(PVR_DBG_ERROR,
486 "MMU_Initialise: ERROR call to SysAcquireData failed");
487 return PVRSRV_ERROR_GENERIC;
490 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
491 sizeof(struct MMU_CONTEXT), (void **) &psMMUContext, NULL)
493 PVR_DPF(PVR_DBG_ERROR,
494 "MMU_Initialise: ERROR call to OSAllocMem failed");
495 return PVRSRV_ERROR_GENERIC;
497 OSMemSet(psMMUContext, 0, sizeof(struct MMU_CONTEXT));
499 psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
500 psMMUContext->psDevInfo = psDevInfo;
502 psMMUContext->psDeviceNode = psDeviceNode;
504 if (psDeviceNode->psLocalDevMemArena == NULL) {
506 (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
507 SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr,
508 &hPDOSMemHandle) != PVRSRV_OK) {
509 PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
510 "ERROR call to OSAllocPages failed");
511 return PVRSRV_ERROR_GENERIC;
515 sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
517 sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
519 SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
521 struct IMG_SYS_PHYADDR sSysPAddr;
523 if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
524 SGX_MMU_PAGE_SIZE, NULL, 0, SGX_MMU_PAGE_SIZE,
525 &(sSysPAddr.uiAddr)) != IMG_TRUE) {
526 PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
527 "ERROR call to RA_Alloc failed");
528 return PVRSRV_ERROR_GENERIC;
531 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
533 SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
534 pvPDCpuVAddr = (void __force *)
535 OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
536 PVRSRV_HAP_WRITECOMBINE |
537 PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle);
539 PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
540 "ERROR failed to map page tables");
541 return PVRSRV_ERROR_GENERIC;
545 PDUMPCOMMENT("Alloc page directory");
547 PDUMPMALLOCPAGETABLE(pvPDCpuVAddr, PDUMP_PD_UNIQUETAG);
550 pui32Tmp = (u32 *) pvPDCpuVAddr;
552 PVR_DPF(PVR_DBG_ERROR,
553 "MMU_Initialise: pvPDCpuVAddr invalid");
554 return PVRSRV_ERROR_GENERIC;
557 for (i = 0; i < SGX_MMU_PD_SIZE; i++)
560 PDUMPCOMMENT("Page directory contents");
561 PDUMPPAGETABLE(pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, IMG_TRUE,
562 PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
564 psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
565 psMMUContext->sPDDevPAddr = sPDDevPAddr;
566 psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
568 *ppsMMUContext = psMMUContext;
570 *psPDDevPAddr = sPDDevPAddr;
572 psMMUContext->psNext = (struct MMU_CONTEXT *)
573 psDevInfo->pvMMUContextList;
574 psDevInfo->pvMMUContextList = (void *) psMMUContext;
580 void MMU_Finalise(struct MMU_CONTEXT *psMMUContext)
583 struct SYS_DATA *psSysData;
584 struct MMU_CONTEXT **ppsMMUContext;
586 if (SysAcquireData(&psSysData) != PVRSRV_OK) {
587 PVR_DPF(PVR_DBG_ERROR,
588 "MMU_Finalise: ERROR call to SysAcquireData failed");
592 PDUMPCOMMENT("Free page directory");
593 PDUMPFREEPAGETABLE(psMMUContext->pvPDCpuVAddr);
595 pui32Tmp = (u32 *) psMMUContext->pvPDCpuVAddr;
597 for (i = 0; i < SGX_MMU_PD_SIZE; i++)
600 if (psMMUContext->psDeviceNode->psLocalDevMemArena == NULL) {
601 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
603 psMMUContext->pvPDCpuVAddr,
604 psMMUContext->hPDOSMemHandle);
607 struct IMG_SYS_PHYADDR sSysPAddr;
608 struct IMG_CPU_PHYADDR sCpuPAddr;
610 sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
611 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
613 OSUnMapPhysToLin((void __iomem __force *)
614 psMMUContext->pvPDCpuVAddr,
616 PVRSRV_HAP_WRITECOMBINE |
617 PVRSRV_HAP_KERNEL_ONLY,
618 psMMUContext->hPDOSMemHandle);
620 RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena,
621 sSysPAddr.uiAddr, IMG_FALSE);
625 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Finalise");
628 (struct MMU_CONTEXT **) &psMMUContext->psDevInfo->pvMMUContextList;
629 while (*ppsMMUContext) {
630 if (*ppsMMUContext == psMMUContext) {
632 *ppsMMUContext = psMMUContext->psNext;
636 ppsMMUContext = &((*ppsMMUContext)->psNext);
639 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
643 void MMU_InsertHeap(struct MMU_CONTEXT *psMMUContext,
644 struct MMU_HEAP *psMMUHeap)
646 u32 *pui32PDCpuVAddr = (u32 *)psMMUContext->pvPDCpuVAddr;
647 u32 *pui32KernelPDCpuVAddr = (u32 *)
648 psMMUHeap->psMMUContext->pvPDCpuVAddr;
650 IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
653 psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
655 pui32KernelPDCpuVAddr +=
656 psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
659 PDUMPCOMMENT("Page directory shared heap range copy");
661 for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PTPageCount;
664 PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
666 pui32PDCpuVAddr[ui32PDEntry] =
667 pui32KernelPDCpuVAddr[ui32PDEntry];
668 if (pui32PDCpuVAddr[ui32PDEntry]) {
669 PDUMPPAGETABLE((void *) &pui32PDCpuVAddr[ui32PDEntry],
670 sizeof(u32), IMG_FALSE,
671 PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
673 bInvalidateDirectoryCache = IMG_TRUE;
677 if (bInvalidateDirectoryCache)
678 MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
682 static void MMU_PDumpPageTables(struct MMU_HEAP *pMMUHeap,
683 struct IMG_DEV_VIRTADDR DevVAddr,
684 size_t uSize, IMG_BOOL bForUnmap, void *hUniqueTag)
686 u32 ui32NumPTEntries;
690 struct MMU_PT_INFO **ppsPTInfoList;
695 (uSize + SGX_MMU_PAGE_SIZE - 1) >> SGX_MMU_PAGE_SHIFT;
698 DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
700 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
702 ui32PTIndex = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
704 PDUMPCOMMENT("Page table mods (num entries == %08X) %s",
705 ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
707 while (ui32NumPTEntries > 0) {
708 struct MMU_PT_INFO *psPTInfo = *ppsPTInfoList++;
710 if (ui32NumPTEntries <= 1024 - ui32PTIndex)
711 ui32PTDumpCount = ui32NumPTEntries;
713 ui32PTDumpCount = 1024 - ui32PTIndex;
716 pui32PTEntry = (u32 *)psPTInfo->PTPageCpuVAddr;
717 PDUMPPAGETABLE((void *)&pui32PTEntry[ui32PTIndex],
718 ui32PTDumpCount * sizeof(u32), IMG_FALSE,
719 PDUMP_PT_UNIQUETAG, hUniqueTag);
722 ui32NumPTEntries -= ui32PTDumpCount;
727 PDUMPCOMMENT("Finished page table mods %s",
728 bForUnmap ? "(for unmap)" : "");
732 static void MMU_UnmapPagesAndFreePTs(struct MMU_HEAP *psMMUHeap,
733 struct IMG_DEV_VIRTADDR sDevVAddr,
734 u32 ui32PageCount, void *hUniqueTag)
736 u32 uPageSize = HOST_PAGESIZE();
737 struct IMG_DEV_VIRTADDR sTmpDevVAddr;
742 IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
745 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
748 sTmpDevVAddr = sDevVAddr;
750 for (i = 0; i < ui32PageCount; i++) {
751 struct MMU_PT_INFO **ppsPTInfoList;
754 sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
758 &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
761 ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK)
762 >> SGX_MMU_PAGE_SHIFT;
764 if (!ppsPTInfoList[0]) {
765 PVR_DPF(PVR_DBG_MESSAGE,
766 "MMU_UnmapPagesAndFreePTs: "
767 "Invalid PT for alloc at VAddr:0x%08lX "
768 "(VaddrIni:0x%08lX AllocPage:%u) "
770 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
771 i, ui32PDIndex, ui32PTIndex);
773 sTmpDevVAddr.uiAddr += uPageSize;
778 pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
783 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) {
784 ppsPTInfoList[0]->ui32ValidPTECount--;
786 PVR_DPF(PVR_DBG_MESSAGE,
787 "MMU_UnmapPagesAndFreePTs: "
788 "Page is already invalid for alloc at "
790 "(VAddrIni:0x%08lX AllocPage:%u) "
792 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
793 i, ui32PDIndex, ui32PTIndex);
796 PVR_ASSERT((s32)ppsPTInfoList[0]->ui32ValidPTECount >=
798 pui32Tmp[ui32PTIndex] = 0;
802 && ppsPTInfoList[0]->ui32ValidPTECount == 0) {
803 _DeferredFreePageTable(psMMUHeap,
804 ui32PDIndex - (psMMUHeap->
807 bInvalidateDirectoryCache = IMG_TRUE;
810 sTmpDevVAddr.uiAddr += uPageSize;
813 if (bInvalidateDirectoryCache) {
814 MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->
817 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->
822 MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
823 IMG_TRUE, hUniqueTag);
827 static void MMU_FreePageTables(void *pvMMUHeap, u32 ui32Start, u32 ui32End,
830 struct MMU_HEAP *pMMUHeap = (struct MMU_HEAP *)pvMMUHeap;
831 struct IMG_DEV_VIRTADDR Start;
833 Start.uiAddr = ui32Start;
835 MMU_UnmapPagesAndFreePTs(pMMUHeap, Start,
836 (ui32End - ui32Start) / SGX_MMU_PAGE_SIZE,
840 struct MMU_HEAP *MMU_Create(struct MMU_CONTEXT *psMMUContext,
841 struct DEV_ARENA_DESCRIPTOR *psDevArena,
842 struct RA_ARENA **ppsVMArena)
844 struct MMU_HEAP *pMMUHeap;
847 PVR_ASSERT(psDevArena != NULL);
849 if (psDevArena == NULL) {
850 PVR_DPF(PVR_DBG_ERROR, "MMU_Create: invalid parameter");
854 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
855 sizeof(struct MMU_HEAP), (void **)&pMMUHeap, NULL)
857 PVR_DPF(PVR_DBG_ERROR,
858 "MMU_Create: ERROR call to OSAllocMem failed");
862 pMMUHeap->psMMUContext = psMMUContext;
863 pMMUHeap->psDevArena = psDevArena;
865 bRes = _AllocPageTables(pMMUHeap);
867 PVR_DPF(PVR_DBG_ERROR,
868 "MMU_Create: ERROR call to _AllocPageTables failed");
869 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
874 pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
875 psDevArena->BaseDevVAddr.uiAddr,
876 psDevArena->ui32Size, NULL,
877 SGX_MMU_PAGE_SIZE, NULL, NULL,
878 MMU_FreePageTables, pMMUHeap);
880 if (pMMUHeap->psVMArena == NULL) {
881 PVR_DPF(PVR_DBG_ERROR,
882 "MMU_Create: ERROR call to RA_Create failed");
883 _DeferredFreePageTables(pMMUHeap);
884 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
889 *ppsVMArena = pMMUHeap->psVMArena;
894 void MMU_Delete(struct MMU_HEAP *pMMUHeap)
896 if (pMMUHeap != NULL) {
897 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Delete");
899 if (pMMUHeap->psVMArena)
900 RA_Delete(pMMUHeap->psVMArena);
901 _DeferredFreePageTables(pMMUHeap);
903 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
908 IMG_BOOL MMU_Alloc(struct MMU_HEAP *pMMUHeap, size_t uSize, u32 uFlags,
909 u32 uDevVAddrAlignment, struct IMG_DEV_VIRTADDR *psDevVAddr)
913 PVR_DPF(PVR_DBG_MESSAGE,
914 "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
915 uSize, uFlags, uDevVAddrAlignment);
917 if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) {
918 bStatus = RA_Alloc(pMMUHeap->psVMArena, uSize, NULL, 0,
919 uDevVAddrAlignment, &(psDevVAddr->uiAddr));
921 PVR_DPF(PVR_DBG_ERROR,
922 "MMU_Alloc: RA_Alloc of VMArena failed");
927 bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
931 PVR_DPF(PVR_DBG_ERROR,
932 "MMU_Alloc: _DeferredAllocPagetables failed");
933 if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
934 RA_Free(pMMUHeap->psVMArena, psDevVAddr->uiAddr,
941 void MMU_Free(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
944 PVR_ASSERT(pMMUHeap != NULL);
946 if (pMMUHeap == NULL) {
947 PVR_DPF(PVR_DBG_ERROR, "MMU_Free: invalid parameter");
951 PVR_DPF(PVR_DBG_MESSAGE,
952 "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap,
955 if ((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
956 (DevVAddr.uiAddr + ui32Size <=
957 pMMUHeap->psDevArena->BaseDevVAddr.uiAddr +
958 pMMUHeap->psDevArena->ui32Size)) {
959 RA_Free(pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
965 PVR_DPF(PVR_DBG_ERROR,
966 "MMU_Free: Couldn't find DevVAddr %08X in a DevArena",
970 void MMU_Enable(struct MMU_HEAP *pMMUHeap)
972 PVR_UNREFERENCED_PARAMETER(pMMUHeap);
976 void MMU_Disable(struct MMU_HEAP *pMMUHeap)
978 PVR_UNREFERENCED_PARAMETER(pMMUHeap);
982 static void MMU_MapPage(struct MMU_HEAP *pMMUHeap,
983 struct IMG_DEV_VIRTADDR DevVAddr,
984 struct IMG_DEV_PHYADDR DevPAddr, u32 ui32MemFlags)
988 u32 ui32MMUFlags = 0;
989 struct MMU_PT_INFO **ppsPTInfoList;
991 if (((PVRSRV_MEM_READ | PVRSRV_MEM_WRITE) & ui32MemFlags) ==
992 (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE))
994 else if (PVRSRV_MEM_READ & ui32MemFlags)
995 ui32MMUFlags |= SGX_MMU_PTE_READONLY;
996 else if (PVRSRV_MEM_WRITE & ui32MemFlags)
997 ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
999 if (PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
1000 ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
1002 if (PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
1003 ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
1005 ui32Index = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
1007 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1009 ui32Index = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1011 pui32Tmp = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1014 if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
1015 PVR_DPF(PVR_DBG_ERROR,
1017 "Page is already valid for alloc at "
1018 "VAddr:0x%08lX PDIdx:%u PTIdx:%u",
1020 DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1021 SGX_MMU_PT_SHIFT), ui32Index);
1023 PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
1025 ppsPTInfoList[0]->ui32ValidPTECount++;
1027 pui32Tmp[ui32Index] = (DevPAddr.uiAddr & SGX_MMU_PTE_ADDR_MASK)
1028 | SGX_MMU_PTE_VALID | ui32MMUFlags;
1031 void MMU_MapScatter(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1032 struct IMG_SYS_PHYADDR *psSysAddr, size_t uSize,
1033 u32 ui32MemFlags, void *hUniqueTag)
1036 struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1039 struct IMG_DEV_PHYADDR DevPAddr;
1041 PVR_ASSERT(pMMUHeap != NULL);
1044 MapBaseDevVAddr = DevVAddr;
1046 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1049 for (i = 0, uCount = 0; uCount < uSize;
1050 i++, uCount += SGX_MMU_PAGE_SIZE) {
1051 struct IMG_SYS_PHYADDR sSysAddr;
1053 sSysAddr = psSysAddr[i];
1056 SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
1058 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1059 DevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
1061 PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapScatter: "
1062 "devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
1063 DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize);
1067 MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1072 void MMU_MapPages(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1073 struct IMG_SYS_PHYADDR SysPAddr, size_t uSize,
1074 u32 ui32MemFlags, void *hUniqueTag)
1076 struct IMG_DEV_PHYADDR DevPAddr;
1078 struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1081 u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1082 u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1084 PVR_ASSERT(pMMUHeap != NULL);
1086 PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapPages: "
1087 "mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
1088 pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize);
1091 MapBaseDevVAddr = DevVAddr;
1093 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1096 DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
1098 if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1101 for (uCount = 0; uCount < uSize; uCount += ui32VAdvance) {
1102 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1103 DevVAddr.uiAddr += ui32VAdvance;
1104 DevPAddr.uiAddr += ui32PAdvance;
1108 MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1113 void MMU_MapShadow(struct MMU_HEAP *pMMUHeap,
1114 struct IMG_DEV_VIRTADDR MapBaseDevVAddr,
1115 size_t uByteSize, void *CpuVAddr, void *hOSMemHandle,
1116 struct IMG_DEV_VIRTADDR *pDevVAddr, u32 ui32MemFlags,
1121 struct IMG_DEV_VIRTADDR MapDevVAddr;
1122 u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1123 u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1126 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1129 PVR_DPF(PVR_DBG_MESSAGE,
1130 "MMU_MapShadow: %08X, 0x%x, %08X",
1131 MapBaseDevVAddr.uiAddr, uByteSize, CpuVAddr);
1133 PVR_ASSERT(((u32) CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1134 PVR_ASSERT(((u32) uByteSize & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1135 pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
1137 if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1140 MapDevVAddr = MapBaseDevVAddr;
1141 for (i = 0; i < uByteSize; i += ui32VAdvance) {
1142 struct IMG_CPU_PHYADDR CpuPAddr;
1143 struct IMG_DEV_PHYADDR DevPAddr;
1147 OSMapLinToCPUPhys((void *)((u32)CpuVAddr +
1150 CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
1152 SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
1154 PVR_DPF(PVR_DBG_MESSAGE, "0x%x: CpuVAddr=%08X, "
1155 "CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
1156 uOffset, (u32)CpuVAddr + uOffset, CpuPAddr.uiAddr,
1157 MapDevVAddr.uiAddr, DevPAddr.uiAddr);
1159 MMU_MapPage(pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
1161 MapDevVAddr.uiAddr += ui32VAdvance;
1162 uOffset += ui32PAdvance;
1166 MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE,
1171 void MMU_UnmapPages(struct MMU_HEAP *psMMUHeap,
1172 struct IMG_DEV_VIRTADDR sDevVAddr, u32 ui32PageCount,
1175 u32 uPageSize = HOST_PAGESIZE();
1176 struct IMG_DEV_VIRTADDR sTmpDevVAddr;
1183 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1186 sTmpDevVAddr = sDevVAddr;
1188 for (i = 0; i < ui32PageCount; i++) {
1189 struct MMU_PT_INFO **ppsPTInfoList;
1191 ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1194 ppsPTInfoList = &psMMUHeap->psMMUContext->
1195 apsPTInfoList[ui32PDIndex];
1197 ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >>
1200 if (!ppsPTInfoList[0]) {
1201 PVR_DPF(PVR_DBG_ERROR,
1203 "ERROR Invalid PT for alloc at VAddr:0x%08lX "
1204 "(VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u "
1206 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1207 ui32PDIndex, ui32PTIndex);
1209 sTmpDevVAddr.uiAddr += uPageSize;
1214 pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
1216 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
1217 ppsPTInfoList[0]->ui32ValidPTECount--;
1219 PVR_DPF(PVR_DBG_ERROR,
1220 "MMU_UnmapPages: Page is already invalid "
1221 "for alloc at VAddr:0x%08lX "
1222 "(VAddrIni:0x%08lX AllocPage:%u) "
1223 "PDIdx:%u PTIdx:%u",
1224 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1225 ui32PDIndex, ui32PTIndex);
1227 PVR_ASSERT((s32) ppsPTInfoList[0]->ui32ValidPTECount >= 0);
1229 pui32Tmp[ui32PTIndex] = 0;
1231 sTmpDevVAddr.uiAddr += uPageSize;
1234 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
1237 MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
1238 IMG_TRUE, hUniqueTag);
1242 struct IMG_DEV_PHYADDR MMU_GetPhysPageAddr(struct MMU_HEAP *pMMUHeap,
1243 struct IMG_DEV_VIRTADDR sDevVPageAddr)
1245 u32 *pui32PageTable;
1247 struct IMG_DEV_PHYADDR sDevPAddr;
1248 struct MMU_PT_INFO **ppsPTInfoList;
1250 ui32Index = sDevVPageAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1253 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1254 if (!ppsPTInfoList[0]) {
1255 PVR_DPF(PVR_DBG_ERROR,
1256 "MMU_GetPhysPageAddr: Not mapped in at 0x%08x",
1257 sDevVPageAddr.uiAddr);
1258 sDevPAddr.uiAddr = 0;
1263 (sDevVPageAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1265 pui32PageTable = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1267 sDevPAddr.uiAddr = pui32PageTable[ui32Index];
1269 sDevPAddr.uiAddr &= SGX_MMU_PTE_ADDR_MASK;
1274 struct IMG_DEV_PHYADDR MMU_GetPDDevPAddr(struct MMU_CONTEXT *pMMUContext)
1276 return pMMUContext->sPDDevPAddr;
1279 enum PVRSRV_ERROR SGXGetPhysPageAddrKM(void *hDevMemHeap,
1280 struct IMG_DEV_VIRTADDR sDevVAddr,
1281 struct IMG_DEV_PHYADDR *pDevPAddr,
1282 struct IMG_CPU_PHYADDR *pCpuPAddr)
1284 struct MMU_HEAP *pMMUHeap;
1285 struct IMG_DEV_PHYADDR DevPAddr;
1287 pMMUHeap = (struct MMU_HEAP *)BM_GetMMUHeap(hDevMemHeap);
1289 DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
1290 pCpuPAddr->uiAddr = DevPAddr.uiAddr;
1291 pDevPAddr->uiAddr = DevPAddr.uiAddr;
1293 return (pDevPAddr->uiAddr != 0) ?
1294 PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
1297 enum PVRSRV_ERROR SGXGetMMUPDAddrKM(void *hDevCookie,
1298 void *hDevMemContext,
1299 struct IMG_DEV_PHYADDR *psPDDevPAddr)
1301 if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
1302 return PVRSRV_ERROR_INVALID_PARAMS;
1305 ((struct BM_CONTEXT *)hDevMemContext)->psMMUContext->sPDDevPAddr;
1310 enum PVRSRV_ERROR MMU_BIFResetPDAlloc(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1312 enum PVRSRV_ERROR eError;
1313 struct SYS_DATA *psSysData;
1314 struct RA_ARENA *psLocalDevMemArena;
1315 void *hOSMemHandle = NULL;
1316 u8 *pui8MemBlock = NULL;
1317 struct IMG_SYS_PHYADDR sMemBlockSysPAddr;
1318 struct IMG_CPU_PHYADDR sMemBlockCpuPAddr;
1320 eError = SysAcquireData(&psSysData);
1321 if (eError != PVRSRV_OK) {
1322 PVR_DPF(PVR_DBG_ERROR,
1323 "MMU_BIFResetPDAlloc: ERROR call to SysAcquireData failed");
1327 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1329 if (psLocalDevMemArena == NULL) {
1332 OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
1333 PVRSRV_HAP_KERNEL_ONLY, 3 * SGX_MMU_PAGE_SIZE,
1334 SGX_MMU_PAGE_SIZE, (void **)&pui8MemBlock,
1336 if (eError != PVRSRV_OK) {
1337 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1338 "ERROR call to OSAllocPages failed");
1341 sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
1343 if (RA_Alloc(psLocalDevMemArena, 3 * SGX_MMU_PAGE_SIZE,
1344 NULL, 0, SGX_MMU_PAGE_SIZE,
1345 &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE) {
1346 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1347 "ERROR call to RA_Alloc failed");
1348 return PVRSRV_ERROR_OUT_OF_MEMORY;
1351 sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
1352 pui8MemBlock = (void __force *)OSMapPhysToLin(sMemBlockCpuPAddr,
1353 SGX_MMU_PAGE_SIZE * 3,
1354 PVRSRV_HAP_WRITECOMBINE |
1355 PVRSRV_HAP_KERNEL_ONLY,
1357 if (!pui8MemBlock) {
1358 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1359 "ERROR failed to map page tables");
1360 return PVRSRV_ERROR_BAD_MAPPING;
1364 psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
1365 psDevInfo->sBIFResetPDDevPAddr =
1366 SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
1367 psDevInfo->sBIFResetPTDevPAddr.uiAddr =
1368 psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1369 psDevInfo->sBIFResetPageDevPAddr.uiAddr =
1370 psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1371 psDevInfo->pui32BIFResetPD = (u32 *) pui8MemBlock;
1372 psDevInfo->pui32BIFResetPT =
1373 (u32 *) (pui8MemBlock + SGX_MMU_PAGE_SIZE);
1375 OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
1376 OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
1378 OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB,
1384 void MMU_BIFResetPDFree(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1386 enum PVRSRV_ERROR eError;
1387 struct SYS_DATA *psSysData;
1388 struct RA_ARENA *psLocalDevMemArena;
1389 struct IMG_SYS_PHYADDR sPDSysPAddr;
1391 eError = SysAcquireData(&psSysData);
1392 if (eError != PVRSRV_OK) {
1393 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDFree: "
1394 "ERROR call to SysAcquireData failed");
1398 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1400 if (psLocalDevMemArena == NULL) {
1401 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1402 3 * SGX_MMU_PAGE_SIZE,
1403 psDevInfo->pui32BIFResetPD,
1404 psDevInfo->hBIFResetPDOSMemHandle);
1406 OSUnMapPhysToLin((void __force __iomem *)
1407 psDevInfo->pui32BIFResetPD,
1408 3 * SGX_MMU_PAGE_SIZE,
1409 PVRSRV_HAP_WRITECOMBINE |
1410 PVRSRV_HAP_KERNEL_ONLY,
1411 psDevInfo->hBIFResetPDOSMemHandle);
1414 SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX,
1415 psDevInfo->sBIFResetPDDevPAddr);
1416 RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
1420 u32 mmu_get_page_dir(struct MMU_CONTEXT *psMMUContext)
1422 return psMMUContext->sPDDevPAddr.uiAddr;
1426 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PVR_DEBUG)
1429 hwrec_mem_dump_page(u32 dev_p_addr)
1433 page = ioremap_nocache(dev_p_addr, SGX_MMU_PAGE_SIZE);
1437 /* Loop through all the pages and dump them */
1438 hwrec_mem_print("<PAGE PA:0x%08X>\n", dev_p_addr);
1439 hwrec_mem_write((void __force *) page, PAGE_SIZE);
1440 hwrec_mem_print("</PAGE>\n");
1448 hwrec_mem_dump_table(u32 dev_p_addr)
1453 pt = ioremap_nocache(dev_p_addr, SGX_MMU_PAGE_SIZE);
1457 /* Loop through all the page tables and dump them */
1458 hwrec_mem_print("<TABLE PA:0x%08X>\n", dev_p_addr);
1459 for (i = 0 ; i < 1024 ; i++)
1460 hwrec_mem_print("0x%08X\n", readl(pt + 4 * i));
1461 hwrec_mem_print("</TABLE>\n");
1463 for (i = 0; i < 1024; i++) {
1464 u32 addr = readl(pt + 4 * i);
1466 if (addr & SGX_MMU_PDE_VALID)
1467 hwrec_mem_dump_page(addr & SGX_MMU_PDE_ADDR_MASK);
1476 hwrec_mem_dump_dir(struct MMU_CONTEXT *context)
1478 void __iomem *pd = (void __force __iomem *) context->pvPDCpuVAddr;
1482 hwrec_mem_print("<DIR PA:0x%08X>\n", context->sPDDevPAddr);
1484 for (i = 0; i < 1024; i++)
1485 hwrec_mem_print("0x%08X\n", readl(pd + 4 * i));
1487 hwrec_mem_print("</DIR>\n");
1489 for (i = 0; i < 1024; i++) {
1490 u32 addr = readl(pd + 4 * i);
1492 if (addr & SGX_MMU_PDE_VALID)
1493 hwrec_mem_dump_table(addr & SGX_MMU_PDE_ADDR_MASK);
1500 mmu_hwrec_mem_dump(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1502 struct MMU_CONTEXT *context = psDevInfo->pvMMUContextList;
1505 page_dir = readl(psDevInfo->pvRegsBaseKM + EUR_CR_BIF_DIR_LIST_BASE0);
1508 if (context->sPDDevPAddr.uiAddr == page_dir)
1511 context = context->psNext;
1515 pr_err("Unable to find matching context for page directory"
1516 " 0x%08X\n", page_dir);
1520 return hwrec_mem_dump_dir(context);
1523 #endif /* CONFIG_DEBUG_FS && CONFIG_PVR_DEBUG */