1 /**********************************************************************
3 * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful but, except
10 * as otherwise stated in writing, without any warranty; without even the
11 * implied warranty of merchantability or fitness for a particular purpose.
12 * See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * Imagination Technologies Ltd. <gpl-support@imgtec.com>
23 * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
25 ******************************************************************************/
29 #include "services_headers.h"
30 #include "buffer_manager.h"
34 #include "sgxapi_km.h"
35 #include "sgx_bridge_km.h"
37 #include "sgxinfokm.h"
40 #define UINT32_MAX_VALUE 0xFFFFFFFFUL
43 void *hPTPageOSMemHandle;
45 u32 ui32ValidPTECount;
49 struct PVRSRV_DEVICE_NODE *psDeviceNode;
51 struct IMG_DEV_PHYADDR sPDDevPAddr;
53 struct MMU_PT_INFO *apsPTInfoList[1024];
54 struct PVRSRV_SGXDEV_INFO *psDevInfo;
55 struct MMU_CONTEXT *psNext;
59 struct MMU_CONTEXT *psMMUContext;
65 struct RA_ARENA *psVMArena;
67 struct DEV_ARENA_DESCRIPTOR *psDevArena;
72 static void MMU_PDumpPageTables(struct MMU_HEAP *pMMUHeap,
73 struct IMG_DEV_VIRTADDR DevVAddr, size_t uSize,
74 IMG_BOOL bForUnmap, void *hUniqueTag);
80 void MMU_InvalidateDirectoryCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
82 psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;
85 static void MMU_InvalidatePageTableCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
87 psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;
90 static IMG_BOOL _AllocPageTables(struct MMU_HEAP *pMMUHeap)
92 PVR_DPF(PVR_DBG_MESSAGE, "_AllocPageTables()");
94 PVR_ASSERT(pMMUHeap != NULL);
95 PVR_ASSERT(HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE);
97 if (pMMUHeap == NULL) {
98 PVR_DPF(PVR_DBG_ERROR, "_AllocPageTables: invalid parameter");
102 pMMUHeap->ui32PTEntryCount =
103 pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT;
105 pMMUHeap->ui32PTBaseIndex =
106 (pMMUHeap->psDevArena->BaseDevVAddr.
107 uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >>
110 pMMUHeap->ui32PTPageCount =
111 (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - 1) >>
117 static void _DeferredFreePageTable(struct MMU_HEAP *pMMUHeap, u32 ui32PTIndex)
122 struct SYS_DATA *psSysData;
123 struct MMU_PT_INFO **ppsPTInfoList;
125 if (SysAcquireData(&psSysData) != PVRSRV_OK) {
126 PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePageTables: "
127 "ERROR call to SysAcquireData failed");
132 pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
135 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
138 PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == NULL ||
139 ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount ==
143 PDUMPCOMMENT("Free page table (page count == %08X)",
144 pMMUHeap->ui32PTPageCount);
145 if (ppsPTInfoList[ui32PTIndex]
146 && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
147 PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
148 ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr,
149 SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
151 switch (pMMUHeap->psDevArena->DevMemHeapType) {
152 case DEVICE_MEMORY_HEAP_SHARED:
153 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
155 struct MMU_CONTEXT *psMMUContext =
156 (struct MMU_CONTEXT *)
157 pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
159 while (psMMUContext) {
161 (u32 *) psMMUContext->pvPDCpuVAddr;
162 pui32PDEntry += ui32PDIndex;
163 pui32PDEntry[ui32PTIndex] = 0;
164 PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
166 pui32PDEntry[ui32PTIndex],
167 sizeof(u32), 0, IMG_FALSE,
170 psMMUContext = psMMUContext->psNext;
174 case DEVICE_MEMORY_HEAP_PERCONTEXT:
175 case DEVICE_MEMORY_HEAP_KERNEL:
179 (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
180 pui32PDEntry += ui32PDIndex;
181 pui32PDEntry[ui32PTIndex] = 0;
182 PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
183 (void *) &pui32PDEntry[ui32PTIndex],
184 sizeof(u32), 0, IMG_FALSE,
185 PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
190 PVR_DPF(PVR_DBG_ERROR,
191 "_DeferredFreePagetable: ERROR invalid heap type");
196 if (ppsPTInfoList[ui32PTIndex] != NULL) {
197 if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != NULL) {
201 (u32 *) ppsPTInfoList[ui32PTIndex]->
205 (i < pMMUHeap->ui32PTEntryCount) && (i < 1024);
209 if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
210 psLocalDevMemArena == NULL) {
211 OSFreePages(PVRSRV_HAP_WRITECOMBINE |
212 PVRSRV_HAP_KERNEL_ONLY,
214 ppsPTInfoList[ui32PTIndex]->
216 ppsPTInfoList[ui32PTIndex]->
219 struct IMG_SYS_PHYADDR sSysPAddr;
220 struct IMG_CPU_PHYADDR sCpuPAddr;
223 OSMapLinToCPUPhys(ppsPTInfoList
226 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
228 OSUnMapPhysToLin((void __force __iomem *)
229 ppsPTInfoList[ui32PTIndex]->
232 PVRSRV_HAP_WRITECOMBINE |
233 PVRSRV_HAP_KERNEL_ONLY,
234 ppsPTInfoList[ui32PTIndex]->
237 RA_Free(pMMUHeap->psDevArena->
238 psDeviceMemoryHeapInfo->
240 sSysPAddr.uiAddr, IMG_FALSE);
243 pMMUHeap->ui32PTEntryCount -= i;
245 pMMUHeap->ui32PTEntryCount -= 1024;
248 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
249 sizeof(struct MMU_PT_INFO),
250 ppsPTInfoList[ui32PTIndex], NULL);
251 ppsPTInfoList[ui32PTIndex] = NULL;
253 pMMUHeap->ui32PTEntryCount -= 1024;
256 PDUMPCOMMENT("Finished free page table (page count == %08X)",
257 pMMUHeap->ui32PTPageCount);
260 static void _DeferredFreePageTables(struct MMU_HEAP *pMMUHeap)
264 for (i = 0; i < pMMUHeap->ui32PTPageCount; i++)
265 _DeferredFreePageTable(pMMUHeap, i);
266 MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
269 static IMG_BOOL _DeferredAllocPagetables(struct MMU_HEAP *pMMUHeap,
270 struct IMG_DEV_VIRTADDR DevVAddr, u32 ui32Size)
276 struct MMU_PT_INFO **ppsPTInfoList;
277 struct SYS_DATA *psSysData;
278 struct IMG_DEV_VIRTADDR sHighDevVAddr;
280 PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE));
282 if (SysAcquireData(&psSysData) != PVRSRV_OK)
286 DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
288 if ((UINT32_MAX_VALUE - DevVAddr.uiAddr) <
289 (ui32Size + (1 << (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT)) - 1)) {
291 sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
293 sHighDevVAddr.uiAddr = DevVAddr.uiAddr + ui32Size +
294 (1 << (SGX_MMU_PAGE_SHIFT +
295 SGX_MMU_PT_SHIFT)) - 1;
299 sHighDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
301 ui32PTPageCount -= ui32PDIndex;
303 pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
304 pui32PDEntry += ui32PDIndex;
306 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
308 PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
309 PDUMPCOMMENT("Page directory mods (page count == %08X)",
312 for (i = 0; i < ui32PTPageCount; i++) {
313 if (ppsPTInfoList[i] == NULL) {
314 OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
315 sizeof(struct MMU_PT_INFO),
316 (void **) &ppsPTInfoList[i], NULL);
317 if (ppsPTInfoList[i] == NULL) {
318 PVR_DPF(PVR_DBG_ERROR,
319 "_DeferredAllocPagetables: "
320 "ERROR call to OSAllocMem failed");
323 OSMemSet(ppsPTInfoList[i], 0,
324 sizeof(struct MMU_PT_INFO));
327 if (ppsPTInfoList[i]->hPTPageOSMemHandle == NULL &&
328 ppsPTInfoList[i]->PTPageCpuVAddr == NULL) {
329 struct IMG_CPU_PHYADDR sCpuPAddr;
330 struct IMG_DEV_PHYADDR sDevPAddr;
332 PVR_ASSERT(pui32PDEntry[i] == 0);
334 if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
335 psLocalDevMemArena == NULL) {
336 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
337 PVRSRV_HAP_KERNEL_ONLY,
340 (void **)&ppsPTInfoList[i]->
343 hPTPageOSMemHandle) !=
345 PVR_DPF(PVR_DBG_ERROR,
346 "_DeferredAllocPagetables: "
347 "ERROR call to OSAllocPages failed");
351 if (ppsPTInfoList[i]->PTPageCpuVAddr) {
353 OSMapLinToCPUPhys(ppsPTInfoList[i]->
357 OSMemHandleToCpuPAddr(
363 SysCpuPAddrToDevPAddr
364 (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
366 struct IMG_SYS_PHYADDR sSysPAddr;
368 if (RA_Alloc(pMMUHeap->psDevArena->
369 psDeviceMemoryHeapInfo->psLocalDevMemArena,
370 SGX_MMU_PAGE_SIZE, NULL, 0,
372 &(sSysPAddr.uiAddr)) != IMG_TRUE) {
373 PVR_DPF(PVR_DBG_ERROR,
374 "_DeferredAllocPagetables: "
375 "ERROR call to RA_Alloc failed");
379 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
380 ppsPTInfoList[i]->PTPageCpuVAddr =
382 OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
383 PVRSRV_HAP_WRITECOMBINE |
384 PVRSRV_HAP_KERNEL_ONLY,
387 if (!ppsPTInfoList[i]->PTPageCpuVAddr) {
388 PVR_DPF(PVR_DBG_ERROR,
389 "_DeferredAllocPagetables: "
390 "ERROR failed to map page tables");
394 sDevPAddr = SysCpuPAddrToDevPAddr
395 (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
400 OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0,
403 PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
404 ppsPTInfoList[i]->PTPageCpuVAddr,
408 PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
409 ppsPTInfoList[i]->PTPageCpuVAddr,
410 SGX_MMU_PAGE_SIZE, 0, IMG_TRUE,
411 PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
413 switch (pMMUHeap->psDevArena->DevMemHeapType) {
414 case DEVICE_MEMORY_HEAP_SHARED:
415 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
417 struct MMU_CONTEXT *psMMUContext =
418 (struct MMU_CONTEXT *)pMMUHeap->
419 psMMUContext->psDevInfo->
422 while (psMMUContext) {
424 (u32 *)psMMUContext->
426 pui32PDEntry += ui32PDIndex;
433 (PVRSRV_DEVICE_TYPE_SGX,
434 (void *)&pui32PDEntry[i],
441 psMMUContext->psNext;
445 case DEVICE_MEMORY_HEAP_PERCONTEXT:
446 case DEVICE_MEMORY_HEAP_KERNEL:
448 pui32PDEntry[i] = sDevPAddr.uiAddr |
451 PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
452 (void *)&pui32PDEntry[i],
454 IMG_FALSE, PDUMP_PD_UNIQUETAG,
461 PVR_DPF(PVR_DBG_ERROR,
462 "_DeferredAllocPagetables: "
463 "ERROR invalid heap type");
469 MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->
473 PVR_ASSERT(pui32PDEntry[i] != 0);
480 enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode,
481 struct MMU_CONTEXT **ppsMMUContext,
482 struct IMG_DEV_PHYADDR *psPDDevPAddr)
487 struct IMG_DEV_PHYADDR sPDDevPAddr;
488 struct IMG_CPU_PHYADDR sCpuPAddr;
489 struct MMU_CONTEXT *psMMUContext;
490 void *hPDOSMemHandle;
491 struct SYS_DATA *psSysData;
492 struct PVRSRV_SGXDEV_INFO *psDevInfo;
494 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Initialise");
496 if (SysAcquireData(&psSysData) != PVRSRV_OK) {
497 PVR_DPF(PVR_DBG_ERROR,
498 "MMU_Initialise: ERROR call to SysAcquireData failed");
499 return PVRSRV_ERROR_GENERIC;
502 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
503 sizeof(struct MMU_CONTEXT), (void **) &psMMUContext, NULL)
505 PVR_DPF(PVR_DBG_ERROR,
506 "MMU_Initialise: ERROR call to OSAllocMem failed");
507 return PVRSRV_ERROR_GENERIC;
509 OSMemSet(psMMUContext, 0, sizeof(struct MMU_CONTEXT));
511 psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
512 psMMUContext->psDevInfo = psDevInfo;
514 psMMUContext->psDeviceNode = psDeviceNode;
516 if (psDeviceNode->psLocalDevMemArena == NULL) {
518 (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
519 SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr,
520 &hPDOSMemHandle) != PVRSRV_OK) {
521 PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
522 "ERROR call to OSAllocPages failed");
523 return PVRSRV_ERROR_GENERIC;
527 sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
529 sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
531 SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
533 struct IMG_SYS_PHYADDR sSysPAddr;
535 if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
536 SGX_MMU_PAGE_SIZE, NULL, 0, SGX_MMU_PAGE_SIZE,
537 &(sSysPAddr.uiAddr)) != IMG_TRUE) {
538 PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
539 "ERROR call to RA_Alloc failed");
540 return PVRSRV_ERROR_GENERIC;
543 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
545 SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
546 pvPDCpuVAddr = (void __force *)
547 OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
548 PVRSRV_HAP_WRITECOMBINE |
549 PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle);
551 PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
552 "ERROR failed to map page tables");
553 return PVRSRV_ERROR_GENERIC;
557 PDUMPCOMMENT("Alloc page directory");
559 PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr,
560 SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
563 pui32Tmp = (u32 *) pvPDCpuVAddr;
565 PVR_DPF(PVR_DBG_ERROR,
566 "MMU_Initialise: pvPDCpuVAddr invalid");
567 return PVRSRV_ERROR_GENERIC;
570 for (i = 0; i < SGX_MMU_PD_SIZE; i++)
573 PDUMPCOMMENT("Page directory contents");
574 PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0,
575 IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
577 psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
578 psMMUContext->sPDDevPAddr = sPDDevPAddr;
579 psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
581 *ppsMMUContext = psMMUContext;
583 *psPDDevPAddr = sPDDevPAddr;
585 psMMUContext->psNext = (struct MMU_CONTEXT *)
586 psDevInfo->pvMMUContextList;
587 psDevInfo->pvMMUContextList = (void *) psMMUContext;
593 void MMU_Finalise(struct MMU_CONTEXT *psMMUContext)
596 struct SYS_DATA *psSysData;
597 struct MMU_CONTEXT **ppsMMUContext;
599 if (SysAcquireData(&psSysData) != PVRSRV_OK) {
600 PVR_DPF(PVR_DBG_ERROR,
601 "MMU_Finalise: ERROR call to SysAcquireData failed");
605 PDUMPCOMMENT("Free page directory");
606 PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->pvPDCpuVAddr,
607 SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
609 pui32Tmp = (u32 *) psMMUContext->pvPDCpuVAddr;
611 for (i = 0; i < SGX_MMU_PD_SIZE; i++)
614 if (psMMUContext->psDeviceNode->psLocalDevMemArena == NULL) {
615 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
617 psMMUContext->pvPDCpuVAddr,
618 psMMUContext->hPDOSMemHandle);
621 struct IMG_SYS_PHYADDR sSysPAddr;
622 struct IMG_CPU_PHYADDR sCpuPAddr;
624 sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
625 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
627 OSUnMapPhysToLin((void __iomem __force *)
628 psMMUContext->pvPDCpuVAddr,
630 PVRSRV_HAP_WRITECOMBINE |
631 PVRSRV_HAP_KERNEL_ONLY,
632 psMMUContext->hPDOSMemHandle);
634 RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena,
635 sSysPAddr.uiAddr, IMG_FALSE);
639 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Finalise");
642 (struct MMU_CONTEXT **) &psMMUContext->psDevInfo->pvMMUContextList;
643 while (*ppsMMUContext) {
644 if (*ppsMMUContext == psMMUContext) {
646 *ppsMMUContext = psMMUContext->psNext;
650 ppsMMUContext = &((*ppsMMUContext)->psNext);
653 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
657 void MMU_InsertHeap(struct MMU_CONTEXT *psMMUContext,
658 struct MMU_HEAP *psMMUHeap)
660 u32 *pui32PDCpuVAddr = (u32 *)psMMUContext->pvPDCpuVAddr;
661 u32 *pui32KernelPDCpuVAddr = (u32 *)
662 psMMUHeap->psMMUContext->pvPDCpuVAddr;
664 IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
667 psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
669 pui32KernelPDCpuVAddr +=
670 psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
673 PDUMPCOMMENT("Page directory shared heap range copy");
675 for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PTPageCount;
678 PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
680 pui32PDCpuVAddr[ui32PDEntry] =
681 pui32KernelPDCpuVAddr[ui32PDEntry];
682 if (pui32PDCpuVAddr[ui32PDEntry]) {
683 PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
684 (void *) &pui32PDCpuVAddr[ui32PDEntry],
685 sizeof(u32), 0, IMG_FALSE,
686 PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
688 bInvalidateDirectoryCache = IMG_TRUE;
692 if (bInvalidateDirectoryCache)
693 MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
696 static void MMU_UnmapPagesAndFreePTs(struct MMU_HEAP *psMMUHeap,
697 struct IMG_DEV_VIRTADDR sDevVAddr,
698 u32 ui32PageCount, void *hUniqueTag)
700 u32 uPageSize = HOST_PAGESIZE();
701 struct IMG_DEV_VIRTADDR sTmpDevVAddr;
706 IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
709 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
712 sTmpDevVAddr = sDevVAddr;
714 for (i = 0; i < ui32PageCount; i++) {
715 struct MMU_PT_INFO **ppsPTInfoList;
718 sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
722 &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
725 ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK)
726 >> SGX_MMU_PAGE_SHIFT;
728 if (!ppsPTInfoList[0]) {
729 PVR_DPF(PVR_DBG_MESSAGE,
730 "MMU_UnmapPagesAndFreePTs: "
731 "Invalid PT for alloc at VAddr:0x%08lX "
732 "(VaddrIni:0x%08lX AllocPage:%u) "
734 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
735 i, ui32PDIndex, ui32PTIndex);
737 sTmpDevVAddr.uiAddr += uPageSize;
742 pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
747 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) {
748 ppsPTInfoList[0]->ui32ValidPTECount--;
750 PVR_DPF(PVR_DBG_MESSAGE,
751 "MMU_UnmapPagesAndFreePTs: "
752 "Page is already invalid for alloc at "
754 "(VAddrIni:0x%08lX AllocPage:%u) "
756 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
757 i, ui32PDIndex, ui32PTIndex);
760 PVR_ASSERT((s32)ppsPTInfoList[0]->ui32ValidPTECount >=
762 pui32Tmp[ui32PTIndex] = 0;
766 && ppsPTInfoList[0]->ui32ValidPTECount == 0) {
767 _DeferredFreePageTable(psMMUHeap,
768 ui32PDIndex - (psMMUHeap->
771 bInvalidateDirectoryCache = IMG_TRUE;
774 sTmpDevVAddr.uiAddr += uPageSize;
777 if (bInvalidateDirectoryCache) {
778 MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->
781 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->
786 MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
787 IMG_TRUE, hUniqueTag);
791 static void MMU_FreePageTables(void *pvMMUHeap, u32 ui32Start, u32 ui32End,
794 struct MMU_HEAP *pMMUHeap = (struct MMU_HEAP *)pvMMUHeap;
795 struct IMG_DEV_VIRTADDR Start;
797 Start.uiAddr = ui32Start;
799 MMU_UnmapPagesAndFreePTs(pMMUHeap, Start,
800 (ui32End - ui32Start) / SGX_MMU_PAGE_SIZE,
804 struct MMU_HEAP *MMU_Create(struct MMU_CONTEXT *psMMUContext,
805 struct DEV_ARENA_DESCRIPTOR *psDevArena,
806 struct RA_ARENA **ppsVMArena)
808 struct MMU_HEAP *pMMUHeap;
811 PVR_ASSERT(psDevArena != NULL);
813 if (psDevArena == NULL) {
814 PVR_DPF(PVR_DBG_ERROR, "MMU_Create: invalid parameter");
818 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
819 sizeof(struct MMU_HEAP), (void **)&pMMUHeap, NULL)
821 PVR_DPF(PVR_DBG_ERROR,
822 "MMU_Create: ERROR call to OSAllocMem failed");
826 pMMUHeap->psMMUContext = psMMUContext;
827 pMMUHeap->psDevArena = psDevArena;
829 bRes = _AllocPageTables(pMMUHeap);
831 PVR_DPF(PVR_DBG_ERROR,
832 "MMU_Create: ERROR call to _AllocPageTables failed");
833 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
838 pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
839 psDevArena->BaseDevVAddr.uiAddr,
840 psDevArena->ui32Size, NULL,
841 SGX_MMU_PAGE_SIZE, NULL, NULL,
842 MMU_FreePageTables, pMMUHeap);
844 if (pMMUHeap->psVMArena == NULL) {
845 PVR_DPF(PVR_DBG_ERROR,
846 "MMU_Create: ERROR call to RA_Create failed");
847 _DeferredFreePageTables(pMMUHeap);
848 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
853 *ppsVMArena = pMMUHeap->psVMArena;
858 void MMU_Delete(struct MMU_HEAP *pMMUHeap)
860 if (pMMUHeap != NULL) {
861 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Delete");
863 if (pMMUHeap->psVMArena)
864 RA_Delete(pMMUHeap->psVMArena);
865 _DeferredFreePageTables(pMMUHeap);
867 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
872 IMG_BOOL MMU_Alloc(struct MMU_HEAP *pMMUHeap, size_t uSize, u32 uFlags,
873 u32 uDevVAddrAlignment, struct IMG_DEV_VIRTADDR *psDevVAddr)
877 PVR_DPF(PVR_DBG_MESSAGE,
878 "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
879 uSize, uFlags, uDevVAddrAlignment);
881 if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) {
882 bStatus = RA_Alloc(pMMUHeap->psVMArena, uSize, NULL, 0,
883 uDevVAddrAlignment, &(psDevVAddr->uiAddr));
885 PVR_DPF(PVR_DBG_ERROR,
886 "MMU_Alloc: RA_Alloc of VMArena failed");
891 bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
895 PVR_DPF(PVR_DBG_ERROR,
896 "MMU_Alloc: _DeferredAllocPagetables failed");
897 if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
898 RA_Free(pMMUHeap->psVMArena, psDevVAddr->uiAddr,
905 void MMU_Free(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
908 PVR_ASSERT(pMMUHeap != NULL);
910 if (pMMUHeap == NULL) {
911 PVR_DPF(PVR_DBG_ERROR, "MMU_Free: invalid parameter");
915 PVR_DPF(PVR_DBG_MESSAGE,
916 "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap,
919 if ((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
920 (DevVAddr.uiAddr + ui32Size <=
921 pMMUHeap->psDevArena->BaseDevVAddr.uiAddr +
922 pMMUHeap->psDevArena->ui32Size)) {
923 RA_Free(pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
929 PVR_DPF(PVR_DBG_ERROR,
930 "MMU_Free: Couldn't find DevVAddr %08X in a DevArena",
934 void MMU_Enable(struct MMU_HEAP *pMMUHeap)
936 PVR_UNREFERENCED_PARAMETER(pMMUHeap);
940 void MMU_Disable(struct MMU_HEAP *pMMUHeap)
942 PVR_UNREFERENCED_PARAMETER(pMMUHeap);
947 static void MMU_PDumpPageTables(struct MMU_HEAP *pMMUHeap,
948 struct IMG_DEV_VIRTADDR DevVAddr,
949 size_t uSize, IMG_BOOL bForUnmap, void *hUniqueTag)
951 u32 ui32NumPTEntries;
955 struct MMU_PT_INFO **ppsPTInfoList;
960 (uSize + SGX_MMU_PAGE_SIZE - 1) >> SGX_MMU_PAGE_SHIFT;
963 DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
965 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
967 ui32PTIndex = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
969 PDUMPCOMMENT("Page table mods (num entries == %08X) %s",
970 ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
972 while (ui32NumPTEntries > 0) {
973 struct MMU_PT_INFO *psPTInfo = *ppsPTInfoList++;
975 if (ui32NumPTEntries <= 1024 - ui32PTIndex)
976 ui32PTDumpCount = ui32NumPTEntries;
978 ui32PTDumpCount = 1024 - ui32PTIndex;
981 pui32PTEntry = (u32 *)psPTInfo->PTPageCpuVAddr;
982 PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
983 (void *)&pui32PTEntry[ui32PTIndex],
984 ui32PTDumpCount * sizeof(u32), 0,
985 IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
988 ui32NumPTEntries -= ui32PTDumpCount;
993 PDUMPCOMMENT("Finished page table mods %s",
994 bForUnmap ? "(for unmap)" : "");
998 static void MMU_MapPage(struct MMU_HEAP *pMMUHeap,
999 struct IMG_DEV_VIRTADDR DevVAddr,
1000 struct IMG_DEV_PHYADDR DevPAddr, u32 ui32MemFlags)
1004 u32 ui32MMUFlags = 0;
1005 struct MMU_PT_INFO **ppsPTInfoList;
1007 if (((PVRSRV_MEM_READ | PVRSRV_MEM_WRITE) & ui32MemFlags) ==
1008 (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE))
1010 else if (PVRSRV_MEM_READ & ui32MemFlags)
1011 ui32MMUFlags |= SGX_MMU_PTE_READONLY;
1012 else if (PVRSRV_MEM_WRITE & ui32MemFlags)
1013 ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
1015 if (PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
1016 ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
1018 if (PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
1019 ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
1021 ui32Index = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
1023 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1025 ui32Index = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1027 pui32Tmp = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1030 if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
1031 PVR_DPF(PVR_DBG_ERROR,
1033 "Page is already valid for alloc at "
1034 "VAddr:0x%08lX PDIdx:%u PTIdx:%u",
1036 DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1037 SGX_MMU_PT_SHIFT), ui32Index);
1039 PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
1041 ppsPTInfoList[0]->ui32ValidPTECount++;
1043 pui32Tmp[ui32Index] = (DevPAddr.uiAddr & SGX_MMU_PTE_ADDR_MASK)
1044 | SGX_MMU_PTE_VALID | ui32MMUFlags;
1047 void MMU_MapScatter(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1048 struct IMG_SYS_PHYADDR *psSysAddr, size_t uSize,
1049 u32 ui32MemFlags, void *hUniqueTag)
1052 struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1055 struct IMG_DEV_PHYADDR DevPAddr;
1057 PVR_ASSERT(pMMUHeap != NULL);
1060 MapBaseDevVAddr = DevVAddr;
1062 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1065 for (i = 0, uCount = 0; uCount < uSize;
1066 i++, uCount += SGX_MMU_PAGE_SIZE) {
1067 struct IMG_SYS_PHYADDR sSysAddr;
1069 sSysAddr = psSysAddr[i];
1072 SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
1074 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1075 DevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
1077 PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapScatter: "
1078 "devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
1079 DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize);
1083 MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1088 void MMU_MapPages(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1089 struct IMG_SYS_PHYADDR SysPAddr, size_t uSize,
1090 u32 ui32MemFlags, void *hUniqueTag)
1092 struct IMG_DEV_PHYADDR DevPAddr;
1094 struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1097 u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1098 u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1100 PVR_ASSERT(pMMUHeap != NULL);
1102 PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapPages: "
1103 "mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
1104 pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize);
1107 MapBaseDevVAddr = DevVAddr;
1109 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1112 DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
1114 if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1117 for (uCount = 0; uCount < uSize; uCount += ui32VAdvance) {
1118 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1119 DevVAddr.uiAddr += ui32VAdvance;
1120 DevPAddr.uiAddr += ui32PAdvance;
1124 MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1129 void MMU_MapShadow(struct MMU_HEAP *pMMUHeap,
1130 struct IMG_DEV_VIRTADDR MapBaseDevVAddr,
1131 size_t uByteSize, void *CpuVAddr, void *hOSMemHandle,
1132 struct IMG_DEV_VIRTADDR *pDevVAddr, u32 ui32MemFlags,
1137 struct IMG_DEV_VIRTADDR MapDevVAddr;
1138 u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1139 u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1142 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1145 PVR_DPF(PVR_DBG_MESSAGE,
1146 "MMU_MapShadow: %08X, 0x%x, %08X",
1147 MapBaseDevVAddr.uiAddr, uByteSize, CpuVAddr);
1149 PVR_ASSERT(((u32) CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1150 PVR_ASSERT(((u32) uByteSize & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1151 pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
1153 if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1156 MapDevVAddr = MapBaseDevVAddr;
1157 for (i = 0; i < uByteSize; i += ui32VAdvance) {
1158 struct IMG_CPU_PHYADDR CpuPAddr;
1159 struct IMG_DEV_PHYADDR DevPAddr;
1163 OSMapLinToCPUPhys((void *)((u32)CpuVAddr +
1166 CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
1168 SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
1170 PVR_DPF(PVR_DBG_MESSAGE, "0x%x: CpuVAddr=%08X, "
1171 "CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
1172 uOffset, (u32)CpuVAddr + uOffset, CpuPAddr.uiAddr,
1173 MapDevVAddr.uiAddr, DevPAddr.uiAddr);
1175 MMU_MapPage(pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
1177 MapDevVAddr.uiAddr += ui32VAdvance;
1178 uOffset += ui32PAdvance;
1182 MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE,
1187 void MMU_UnmapPages(struct MMU_HEAP *psMMUHeap,
1188 struct IMG_DEV_VIRTADDR sDevVAddr, u32 ui32PageCount,
1191 u32 uPageSize = HOST_PAGESIZE();
1192 struct IMG_DEV_VIRTADDR sTmpDevVAddr;
1199 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1202 sTmpDevVAddr = sDevVAddr;
1204 for (i = 0; i < ui32PageCount; i++) {
1205 struct MMU_PT_INFO **ppsPTInfoList;
1207 ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1210 ppsPTInfoList = &psMMUHeap->psMMUContext->
1211 apsPTInfoList[ui32PDIndex];
1213 ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >>
1216 if (!ppsPTInfoList[0]) {
1217 PVR_DPF(PVR_DBG_ERROR,
1219 "ERROR Invalid PT for alloc at VAddr:0x%08lX "
1220 "(VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u "
1222 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1223 ui32PDIndex, ui32PTIndex);
1225 sTmpDevVAddr.uiAddr += uPageSize;
1230 pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
1232 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
1233 ppsPTInfoList[0]->ui32ValidPTECount--;
1235 PVR_DPF(PVR_DBG_ERROR,
1236 "MMU_UnmapPages: Page is already invalid "
1237 "for alloc at VAddr:0x%08lX "
1238 "(VAddrIni:0x%08lX AllocPage:%u) "
1239 "PDIdx:%u PTIdx:%u",
1240 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1241 ui32PDIndex, ui32PTIndex);
1243 PVR_ASSERT((s32) ppsPTInfoList[0]->ui32ValidPTECount >= 0);
1245 pui32Tmp[ui32PTIndex] = 0;
1247 sTmpDevVAddr.uiAddr += uPageSize;
1250 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
1253 MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
1254 IMG_TRUE, hUniqueTag);
1258 struct IMG_DEV_PHYADDR MMU_GetPhysPageAddr(struct MMU_HEAP *pMMUHeap,
1259 struct IMG_DEV_VIRTADDR sDevVPageAddr)
1261 u32 *pui32PageTable;
1263 struct IMG_DEV_PHYADDR sDevPAddr;
1264 struct MMU_PT_INFO **ppsPTInfoList;
1266 ui32Index = sDevVPageAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1269 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1270 if (!ppsPTInfoList[0]) {
1271 PVR_DPF(PVR_DBG_ERROR,
1272 "MMU_GetPhysPageAddr: Not mapped in at 0x%08x",
1273 sDevVPageAddr.uiAddr);
1274 sDevPAddr.uiAddr = 0;
1279 (sDevVPageAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1281 pui32PageTable = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1283 sDevPAddr.uiAddr = pui32PageTable[ui32Index];
1285 sDevPAddr.uiAddr &= SGX_MMU_PTE_ADDR_MASK;
1290 struct IMG_DEV_PHYADDR MMU_GetPDDevPAddr(struct MMU_CONTEXT *pMMUContext)
1292 return pMMUContext->sPDDevPAddr;
1295 enum PVRSRV_ERROR SGXGetPhysPageAddrKM(void *hDevMemHeap,
1296 struct IMG_DEV_VIRTADDR sDevVAddr,
1297 struct IMG_DEV_PHYADDR *pDevPAddr,
1298 struct IMG_CPU_PHYADDR *pCpuPAddr)
1300 struct MMU_HEAP *pMMUHeap;
1301 struct IMG_DEV_PHYADDR DevPAddr;
1303 pMMUHeap = (struct MMU_HEAP *)BM_GetMMUHeap(hDevMemHeap);
1305 DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
1306 pCpuPAddr->uiAddr = DevPAddr.uiAddr;
1307 pDevPAddr->uiAddr = DevPAddr.uiAddr;
1309 return (pDevPAddr->uiAddr != 0) ?
1310 PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
1313 enum PVRSRV_ERROR SGXGetMMUPDAddrKM(void *hDevCookie,
1314 void *hDevMemContext,
1315 struct IMG_DEV_PHYADDR *psPDDevPAddr)
1317 if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
1318 return PVRSRV_ERROR_INVALID_PARAMS;
1321 ((struct BM_CONTEXT *)hDevMemContext)->psMMUContext->sPDDevPAddr;
1326 enum PVRSRV_ERROR MMU_BIFResetPDAlloc(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1328 enum PVRSRV_ERROR eError;
1329 struct SYS_DATA *psSysData;
1330 struct RA_ARENA *psLocalDevMemArena;
1331 void *hOSMemHandle = NULL;
1332 u8 *pui8MemBlock = NULL;
1333 struct IMG_SYS_PHYADDR sMemBlockSysPAddr;
1334 struct IMG_CPU_PHYADDR sMemBlockCpuPAddr;
1336 eError = SysAcquireData(&psSysData);
1337 if (eError != PVRSRV_OK) {
1338 PVR_DPF(PVR_DBG_ERROR,
1339 "MMU_BIFResetPDAlloc: ERROR call to SysAcquireData failed");
1343 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1345 if (psLocalDevMemArena == NULL) {
1348 OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
1349 PVRSRV_HAP_KERNEL_ONLY, 3 * SGX_MMU_PAGE_SIZE,
1350 SGX_MMU_PAGE_SIZE, (void **)&pui8MemBlock,
1352 if (eError != PVRSRV_OK) {
1353 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1354 "ERROR call to OSAllocPages failed");
1357 sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
1359 if (RA_Alloc(psLocalDevMemArena, 3 * SGX_MMU_PAGE_SIZE,
1360 NULL, 0, SGX_MMU_PAGE_SIZE,
1361 &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE) {
1362 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1363 "ERROR call to RA_Alloc failed");
1364 return PVRSRV_ERROR_OUT_OF_MEMORY;
1367 sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
1368 pui8MemBlock = (void __force *)OSMapPhysToLin(sMemBlockCpuPAddr,
1369 SGX_MMU_PAGE_SIZE * 3,
1370 PVRSRV_HAP_WRITECOMBINE |
1371 PVRSRV_HAP_KERNEL_ONLY,
1373 if (!pui8MemBlock) {
1374 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1375 "ERROR failed to map page tables");
1376 return PVRSRV_ERROR_BAD_MAPPING;
1380 psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
1381 psDevInfo->sBIFResetPDDevPAddr =
1382 SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
1383 psDevInfo->sBIFResetPTDevPAddr.uiAddr =
1384 psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1385 psDevInfo->sBIFResetPageDevPAddr.uiAddr =
1386 psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1387 psDevInfo->pui32BIFResetPD = (u32 *) pui8MemBlock;
1388 psDevInfo->pui32BIFResetPT =
1389 (u32 *) (pui8MemBlock + SGX_MMU_PAGE_SIZE);
1391 OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
1392 OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
1394 OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB,
1400 void MMU_BIFResetPDFree(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1402 enum PVRSRV_ERROR eError;
1403 struct SYS_DATA *psSysData;
1404 struct RA_ARENA *psLocalDevMemArena;
1405 struct IMG_SYS_PHYADDR sPDSysPAddr;
1407 eError = SysAcquireData(&psSysData);
1408 if (eError != PVRSRV_OK) {
1409 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDFree: "
1410 "ERROR call to SysAcquireData failed");
1414 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1416 if (psLocalDevMemArena == NULL) {
1417 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1418 3 * SGX_MMU_PAGE_SIZE,
1419 psDevInfo->pui32BIFResetPD,
1420 psDevInfo->hBIFResetPDOSMemHandle);
1422 OSUnMapPhysToLin((void __force __iomem *)
1423 psDevInfo->pui32BIFResetPD,
1424 3 * SGX_MMU_PAGE_SIZE,
1425 PVRSRV_HAP_WRITECOMBINE |
1426 PVRSRV_HAP_KERNEL_ONLY,
1427 psDevInfo->hBIFResetPDOSMemHandle);
1430 SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX,
1431 psDevInfo->sBIFResetPDDevPAddr);
1432 RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
1436 u32 mmu_get_page_dir(struct MMU_CONTEXT *psMMUContext)
1438 return psMMUContext->sPDDevPAddr.uiAddr;