gpu: pvr: pdumpfs: add Kconfig and debugfs pdump mode handling
[sgx.git] / pvr / mmu.c
1 /**********************************************************************
2  *
3  * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful but, except
10  * as otherwise stated in writing, without any warranty; without even the
11  * implied warranty of merchantability or fitness for a particular purpose.
12  * See the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * Imagination Technologies Ltd. <gpl-support@imgtec.com>
23  * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
24  *
25  ******************************************************************************/
26
27 #include "sgxdefs.h"
28 #include "sgxmmu.h"
29 #include "services_headers.h"
30 #include "buffer_manager.h"
31 #include "hash.h"
32 #include "ra.h"
33 #include "pvr_pdump.h"
34 #include "sgxapi_km.h"
35 #include "sgx_bridge_km.h"
36 #include "sgxinfo.h"
37 #include "sgxinfokm.h"
38 #include "mmu.h"
39
40 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PVR_DEBUG)
41 #include "pvr_debugfs.h"
42 #include <linux/io.h>
43 #endif
44
45 #define UINT32_MAX_VALUE        0xFFFFFFFFUL
46
47 struct MMU_PT_INFO {
48         void *hPTPageOSMemHandle;
49         void *PTPageCpuVAddr;
50         u32 ui32ValidPTECount;
51 };
52
53 struct MMU_CONTEXT {
54         struct PVRSRV_DEVICE_NODE *psDeviceNode;
55         void *pvPDCpuVAddr;
56         struct IMG_DEV_PHYADDR sPDDevPAddr;
57         void *hPDOSMemHandle;
58         struct MMU_PT_INFO *apsPTInfoList[1024];
59         struct PVRSRV_SGXDEV_INFO *psDevInfo;
60         struct MMU_CONTEXT *psNext;
61 };
62
63 struct MMU_HEAP {
64         struct MMU_CONTEXT *psMMUContext;
65
66         u32 ui32PTBaseIndex;
67         u32 ui32PTPageCount;
68         u32 ui32PTEntryCount;
69
70         struct RA_ARENA *psVMArena;
71
72         struct DEV_ARENA_DESCRIPTOR *psDevArena;
73 };
74
75 #define PAGE_TEST                                       0
76
77
78 void MMU_InvalidateDirectoryCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
79 {
80         psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;
81 }
82
83 static void MMU_InvalidatePageTableCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
84 {
85         psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;
86 }
87
88 static IMG_BOOL _AllocPageTables(struct MMU_HEAP *pMMUHeap)
89 {
90         PVR_DPF(PVR_DBG_MESSAGE, "_AllocPageTables()");
91
92         PVR_ASSERT(pMMUHeap != NULL);
93         PVR_ASSERT(HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE);
94
95         if (pMMUHeap == NULL) {
96                 PVR_DPF(PVR_DBG_ERROR, "_AllocPageTables: invalid parameter");
97                 return IMG_FALSE;
98         }
99
100         pMMUHeap->ui32PTEntryCount =
101             pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT;
102
103         pMMUHeap->ui32PTBaseIndex =
104             (pMMUHeap->psDevArena->BaseDevVAddr.
105              uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >>
106                                                         SGX_MMU_PAGE_SHIFT;
107
108         pMMUHeap->ui32PTPageCount =
109             (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - 1) >>
110                                                         SGX_MMU_PT_SHIFT;
111
112         return IMG_TRUE;
113 }
114
115 static void _DeferredFreePageTable(struct MMU_HEAP *pMMUHeap, u32 ui32PTIndex)
116 {
117         u32 *pui32PDEntry;
118         u32 i;
119         u32 ui32PDIndex;
120         struct SYS_DATA *psSysData;
121         struct MMU_PT_INFO **ppsPTInfoList;
122
123         if (SysAcquireData(&psSysData) != PVRSRV_OK) {
124                 PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePageTables: "
125                                         "ERROR call to SysAcquireData failed");
126                 return;
127         }
128
129         ui32PDIndex =
130             pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
131                                                           SGX_MMU_PT_SHIFT);
132
133         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
134
135         {
136                 PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == NULL ||
137                            ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount ==
138                                                                          0);
139         }
140
141         PDUMPCOMMENT("Free page table (page count == %08X)",
142                      pMMUHeap->ui32PTPageCount);
143         if (ppsPTInfoList[ui32PTIndex]
144             && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
145                 PDUMPFREEPAGETABLE(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
146
147         switch (pMMUHeap->psDevArena->DevMemHeapType) {
148         case DEVICE_MEMORY_HEAP_SHARED:
149         case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
150                 {
151                         struct MMU_CONTEXT *psMMUContext =
152                           (struct MMU_CONTEXT *)
153                             pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
154
155                         while (psMMUContext) {
156                                 pui32PDEntry =
157                                     (u32 *) psMMUContext->pvPDCpuVAddr;
158                                 pui32PDEntry += ui32PDIndex;
159                                 pui32PDEntry[ui32PTIndex] = 0;
160                                 PDUMPPAGETABLE((void *) &pui32PDEntry
161                                                [ui32PTIndex],
162                                                sizeof(u32), IMG_FALSE,
163                                                PDUMP_PT_UNIQUETAG,
164                                                PDUMP_PT_UNIQUETAG);
165                                 psMMUContext = psMMUContext->psNext;
166                         }
167                         break;
168                 }
169         case DEVICE_MEMORY_HEAP_PERCONTEXT:
170         case DEVICE_MEMORY_HEAP_KERNEL:
171                 {
172
173                         pui32PDEntry =
174                             (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
175                         pui32PDEntry += ui32PDIndex;
176                         pui32PDEntry[ui32PTIndex] = 0;
177                         PDUMPPAGETABLE((void *) &pui32PDEntry[ui32PTIndex],
178                                        sizeof(u32), IMG_FALSE,
179                                        PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
180                         break;
181                 }
182         default:
183                 {
184                         PVR_DPF(PVR_DBG_ERROR,
185                         "_DeferredFreePagetable: ERROR invalid heap type");
186                         return;
187                 }
188         }
189
190         if (ppsPTInfoList[ui32PTIndex] != NULL) {
191                 if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != NULL) {
192                         u32 *pui32Tmp;
193
194                         pui32Tmp =
195                             (u32 *) ppsPTInfoList[ui32PTIndex]->
196                             PTPageCpuVAddr;
197
198                         for (i = 0;
199                              (i < pMMUHeap->ui32PTEntryCount) && (i < 1024);
200                              i++)
201                                 pui32Tmp[i] = 0;
202
203                         if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
204                             psLocalDevMemArena == NULL) {
205                                 OSFreePages(PVRSRV_HAP_WRITECOMBINE |
206                                             PVRSRV_HAP_KERNEL_ONLY,
207                                             SGX_MMU_PAGE_SIZE,
208                                             ppsPTInfoList[ui32PTIndex]->
209                                                     PTPageCpuVAddr,
210                                             ppsPTInfoList[ui32PTIndex]->
211                                                     hPTPageOSMemHandle);
212                         } else {
213                                 struct IMG_SYS_PHYADDR sSysPAddr;
214                                 struct IMG_CPU_PHYADDR sCpuPAddr;
215
216                                 sCpuPAddr =
217                                     OSMapLinToCPUPhys(ppsPTInfoList
218                                                       [ui32PTIndex]->
219                                                       PTPageCpuVAddr);
220                                 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
221
222                                 OSUnMapPhysToLin((void __force __iomem *)
223                                                    ppsPTInfoList[ui32PTIndex]->
224                                                         PTPageCpuVAddr,
225                                                  SGX_MMU_PAGE_SIZE,
226                                                  PVRSRV_HAP_WRITECOMBINE |
227                                                          PVRSRV_HAP_KERNEL_ONLY,
228                                                  ppsPTInfoList[ui32PTIndex]->
229                                                          hPTPageOSMemHandle);
230
231                                 RA_Free(pMMUHeap->psDevArena->
232                                                 psDeviceMemoryHeapInfo->
233                                                         psLocalDevMemArena,
234                                         sSysPAddr.uiAddr, IMG_FALSE);
235                         }
236
237                         pMMUHeap->ui32PTEntryCount -= i;
238                 } else {
239                         pMMUHeap->ui32PTEntryCount -= 1024;
240                 }
241
242                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
243                           sizeof(struct MMU_PT_INFO),
244                           ppsPTInfoList[ui32PTIndex], NULL);
245                 ppsPTInfoList[ui32PTIndex] = NULL;
246         } else {
247                 pMMUHeap->ui32PTEntryCount -= 1024;
248         }
249
250         PDUMPCOMMENT("Finished free page table (page count == %08X)",
251                      pMMUHeap->ui32PTPageCount);
252 }
253
254 static void _DeferredFreePageTables(struct MMU_HEAP *pMMUHeap)
255 {
256         u32 i;
257
258         for (i = 0; i < pMMUHeap->ui32PTPageCount; i++)
259                 _DeferredFreePageTable(pMMUHeap, i);
260         MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
261 }
262
263 static IMG_BOOL _DeferredAllocPagetables(struct MMU_HEAP *pMMUHeap,
264                                 struct IMG_DEV_VIRTADDR DevVAddr, u32 ui32Size)
265 {
266         u32 ui32PTPageCount;
267         u32 ui32PDIndex;
268         u32 i;
269         u32 *pui32PDEntry;
270         struct MMU_PT_INFO **ppsPTInfoList;
271         struct SYS_DATA *psSysData;
272         struct IMG_DEV_VIRTADDR sHighDevVAddr;
273
274         PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE));
275
276         if (SysAcquireData(&psSysData) != PVRSRV_OK)
277                 return IMG_FALSE;
278
279         ui32PDIndex =
280             DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
281
282         if ((UINT32_MAX_VALUE - DevVAddr.uiAddr) <
283             (ui32Size + (1 << (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT)) - 1)) {
284
285                 sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
286         } else {
287                 sHighDevVAddr.uiAddr = DevVAddr.uiAddr + ui32Size +
288                                         (1 << (SGX_MMU_PAGE_SHIFT +
289                                                SGX_MMU_PT_SHIFT)) - 1;
290         }
291
292         ui32PTPageCount =
293             sHighDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
294
295         ui32PTPageCount -= ui32PDIndex;
296
297         pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
298         pui32PDEntry += ui32PDIndex;
299
300         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
301
302         PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
303         PDUMPCOMMENT("Page directory mods (page count == %08X)",
304                      ui32PTPageCount);
305
306         for (i = 0; i < ui32PTPageCount; i++) {
307                 if (ppsPTInfoList[i] == NULL) {
308                         if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
309                                    sizeof(struct MMU_PT_INFO),
310                                    (void **) &ppsPTInfoList[i], NULL)
311                                         != PVRSRV_OK) {
312                                 PVR_DPF(PVR_DBG_ERROR,
313                                         "_DeferredAllocPagetables: "
314                                         "ERROR call to OSAllocMem failed");
315                                 return IMG_FALSE;
316                         }
317                         OSMemSet(ppsPTInfoList[i], 0,
318                                  sizeof(struct MMU_PT_INFO));
319                 }
320
321                 if (ppsPTInfoList[i]->hPTPageOSMemHandle == NULL &&
322                     ppsPTInfoList[i]->PTPageCpuVAddr == NULL) {
323                         struct IMG_CPU_PHYADDR sCpuPAddr;
324                         struct IMG_DEV_PHYADDR sDevPAddr;
325
326                         PVR_ASSERT(pui32PDEntry[i] == 0);
327
328                         if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
329                             psLocalDevMemArena == NULL) {
330                                 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
331                                                      PVRSRV_HAP_KERNEL_ONLY,
332                                              SGX_MMU_PAGE_SIZE,
333                                              SGX_MMU_PAGE_SIZE,
334                                              (void **)&ppsPTInfoList[i]->
335                                                 PTPageCpuVAddr,
336                                              &ppsPTInfoList[i]->
337                                                 hPTPageOSMemHandle) !=
338                                     PVRSRV_OK) {
339                                         PVR_DPF(PVR_DBG_ERROR,
340                                            "_DeferredAllocPagetables: "
341                                            "ERROR call to OSAllocPages failed");
342                                         return IMG_FALSE;
343                                 }
344
345                                 if (ppsPTInfoList[i]->PTPageCpuVAddr) {
346                                         sCpuPAddr =
347                                             OSMapLinToCPUPhys(ppsPTInfoList[i]->
348                                                               PTPageCpuVAddr);
349                                 } else {
350                                         sCpuPAddr =
351                                             OSMemHandleToCpuPAddr(
352                                                 ppsPTInfoList[i]->
353                                                           hPTPageOSMemHandle,
354                                                 0);
355                                 }
356                                 sDevPAddr =
357                                     SysCpuPAddrToDevPAddr
358                                             (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
359                         } else {
360                                 struct IMG_SYS_PHYADDR sSysPAddr;
361
362                                 if (RA_Alloc(pMMUHeap->psDevArena->
363                                      psDeviceMemoryHeapInfo->psLocalDevMemArena,
364                                      SGX_MMU_PAGE_SIZE, NULL, 0,
365                                      SGX_MMU_PAGE_SIZE,
366                                      &(sSysPAddr.uiAddr)) != IMG_TRUE) {
367                                         PVR_DPF(PVR_DBG_ERROR,
368                                                "_DeferredAllocPagetables: "
369                                                "ERROR call to RA_Alloc failed");
370                                         return IMG_FALSE;
371                                 }
372
373                                 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
374                                 ppsPTInfoList[i]->PTPageCpuVAddr =
375                                     (void __force *)
376                                     OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
377                                                    PVRSRV_HAP_WRITECOMBINE |
378                                                    PVRSRV_HAP_KERNEL_ONLY,
379                                                    &ppsPTInfoList[i]->
380                                                    hPTPageOSMemHandle);
381                                 if (!ppsPTInfoList[i]->PTPageCpuVAddr) {
382                                         PVR_DPF(PVR_DBG_ERROR,
383                                              "_DeferredAllocPagetables: "
384                                              "ERROR failed to map page tables");
385                                         return IMG_FALSE;
386                                 }
387
388                                 sDevPAddr = SysCpuPAddrToDevPAddr
389                                             (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
390
391                         }
392
393
394                         OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0,
395                                  SGX_MMU_PAGE_SIZE);
396
397                         PDUMPMALLOCPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
398                                              PDUMP_PT_UNIQUETAG);
399
400                         PDUMPPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
401                                        SGX_MMU_PAGE_SIZE, IMG_TRUE,
402                                        PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
403
404                         switch (pMMUHeap->psDevArena->DevMemHeapType) {
405                         case DEVICE_MEMORY_HEAP_SHARED:
406                         case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
407                                 {
408                                         struct MMU_CONTEXT *psMMUContext =
409                                             (struct MMU_CONTEXT *)pMMUHeap->
410                                                     psMMUContext->psDevInfo->
411                                                             pvMMUContextList;
412
413                                         while (psMMUContext) {
414                                                 pui32PDEntry =
415                                                     (u32 *)psMMUContext->
416                                                                 pvPDCpuVAddr;
417                                                 pui32PDEntry += ui32PDIndex;
418
419                                                 pui32PDEntry[i] =
420                                                     sDevPAddr.uiAddr |
421                                                         SGX_MMU_PDE_VALID;
422
423                                                 PDUMPPAGETABLE
424                                                     ((void *)&pui32PDEntry[i],
425                                                      sizeof(u32), IMG_FALSE,
426                                                      PDUMP_PD_UNIQUETAG,
427                                                      PDUMP_PT_UNIQUETAG);
428
429                                                 psMMUContext =
430                                                     psMMUContext->psNext;
431                                         }
432                                         break;
433                                 }
434                         case DEVICE_MEMORY_HEAP_PERCONTEXT:
435                         case DEVICE_MEMORY_HEAP_KERNEL:
436                                 {
437                                         pui32PDEntry[i] = sDevPAddr.uiAddr |
438                                                              SGX_MMU_PDE_VALID;
439
440                                         PDUMPPAGETABLE((void *)&pui32PDEntry[i],
441                                                        sizeof(u32), IMG_FALSE,
442                                                        PDUMP_PD_UNIQUETAG,
443                                                        PDUMP_PT_UNIQUETAG);
444
445                                         break;
446                                 }
447                         default:
448                                 {
449                                         PVR_DPF(PVR_DBG_ERROR,
450                                                 "_DeferredAllocPagetables: "
451                                                 "ERROR invalid heap type");
452                                         return IMG_FALSE;
453                                 }
454                         }
455
456
457                         MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->
458                                                      psDevInfo);
459                 } else {
460
461                         PVR_ASSERT(pui32PDEntry[i] != 0);
462                 }
463         }
464
465         return IMG_TRUE;
466 }
467
468 enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode,
469                             struct MMU_CONTEXT **ppsMMUContext,
470                             struct IMG_DEV_PHYADDR *psPDDevPAddr)
471 {
472         u32 *pui32Tmp;
473         u32 i;
474         void *pvPDCpuVAddr;
475         struct IMG_DEV_PHYADDR sPDDevPAddr;
476         struct IMG_CPU_PHYADDR sCpuPAddr;
477         struct MMU_CONTEXT *psMMUContext;
478         void *hPDOSMemHandle;
479         struct SYS_DATA *psSysData;
480         struct PVRSRV_SGXDEV_INFO *psDevInfo;
481
482         PVR_DPF(PVR_DBG_MESSAGE, "MMU_Initialise");
483
484         if (SysAcquireData(&psSysData) != PVRSRV_OK) {
485                 PVR_DPF(PVR_DBG_ERROR,
486                          "MMU_Initialise: ERROR call to SysAcquireData failed");
487                 return PVRSRV_ERROR_GENERIC;
488         }
489
490         if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
491                    sizeof(struct MMU_CONTEXT), (void **) &psMMUContext, NULL)
492                         != PVRSRV_OK) {
493                 PVR_DPF(PVR_DBG_ERROR,
494                          "MMU_Initialise: ERROR call to OSAllocMem failed");
495                 return PVRSRV_ERROR_GENERIC;
496         }
497         OSMemSet(psMMUContext, 0, sizeof(struct MMU_CONTEXT));
498
499         psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
500         psMMUContext->psDevInfo = psDevInfo;
501
502         psMMUContext->psDeviceNode = psDeviceNode;
503
504         if (psDeviceNode->psLocalDevMemArena == NULL) {
505                 if (OSAllocPages
506                     (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
507                      SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr,
508                      &hPDOSMemHandle) != PVRSRV_OK) {
509                         PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
510                                         "ERROR call to OSAllocPages failed");
511                         return PVRSRV_ERROR_GENERIC;
512                 }
513
514                 if (pvPDCpuVAddr)
515                         sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
516                 else
517                         sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
518                 sPDDevPAddr =
519                     SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
520         } else {
521                 struct IMG_SYS_PHYADDR sSysPAddr;
522
523                 if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
524                              SGX_MMU_PAGE_SIZE, NULL, 0, SGX_MMU_PAGE_SIZE,
525                              &(sSysPAddr.uiAddr)) != IMG_TRUE) {
526                         PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
527                                         "ERROR call to RA_Alloc failed");
528                         return PVRSRV_ERROR_GENERIC;
529                 }
530
531                 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
532                 sPDDevPAddr =
533                     SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
534                 pvPDCpuVAddr = (void __force *)
535                     OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
536                                    PVRSRV_HAP_WRITECOMBINE |
537                                    PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle);
538                 if (!pvPDCpuVAddr) {
539                         PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
540                                         "ERROR failed to map page tables");
541                         return PVRSRV_ERROR_GENERIC;
542                 }
543         }
544
545         PDUMPCOMMENT("Alloc page directory");
546
547         PDUMPMALLOCPAGETABLE(pvPDCpuVAddr, PDUMP_PD_UNIQUETAG);
548
549         if (pvPDCpuVAddr) {
550                 pui32Tmp = (u32 *) pvPDCpuVAddr;
551         } else {
552                 PVR_DPF(PVR_DBG_ERROR,
553                          "MMU_Initialise: pvPDCpuVAddr invalid");
554                 return PVRSRV_ERROR_GENERIC;
555         }
556
557         for (i = 0; i < SGX_MMU_PD_SIZE; i++)
558                 pui32Tmp[i] = 0;
559
560         PDUMPCOMMENT("Page directory contents");
561         PDUMPPAGETABLE(pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, IMG_TRUE,
562                        PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
563
564         psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
565         psMMUContext->sPDDevPAddr = sPDDevPAddr;
566         psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
567
568         *ppsMMUContext = psMMUContext;
569
570         *psPDDevPAddr = sPDDevPAddr;
571
572         psMMUContext->psNext = (struct MMU_CONTEXT *)
573                                                 psDevInfo->pvMMUContextList;
574         psDevInfo->pvMMUContextList = (void *) psMMUContext;
575
576
577         return PVRSRV_OK;
578 }
579
580 void MMU_Finalise(struct MMU_CONTEXT *psMMUContext)
581 {
582         u32 *pui32Tmp, i;
583         struct SYS_DATA *psSysData;
584         struct MMU_CONTEXT **ppsMMUContext;
585
586         if (SysAcquireData(&psSysData) != PVRSRV_OK) {
587                 PVR_DPF(PVR_DBG_ERROR,
588                          "MMU_Finalise: ERROR call to SysAcquireData failed");
589                 return;
590         }
591
592         PDUMPCOMMENT("Free page directory");
593         PDUMPFREEPAGETABLE(psMMUContext->pvPDCpuVAddr);
594
595         pui32Tmp = (u32 *) psMMUContext->pvPDCpuVAddr;
596
597         for (i = 0; i < SGX_MMU_PD_SIZE; i++)
598                 pui32Tmp[i] = 0;
599
600         if (psMMUContext->psDeviceNode->psLocalDevMemArena == NULL) {
601                 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
602                             SGX_MMU_PAGE_SIZE,
603                             psMMUContext->pvPDCpuVAddr,
604                             psMMUContext->hPDOSMemHandle);
605
606         } else {
607                 struct IMG_SYS_PHYADDR sSysPAddr;
608                 struct IMG_CPU_PHYADDR sCpuPAddr;
609
610                 sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
611                 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
612
613                 OSUnMapPhysToLin((void __iomem __force *)
614                                         psMMUContext->pvPDCpuVAddr,
615                                  SGX_MMU_PAGE_SIZE,
616                                  PVRSRV_HAP_WRITECOMBINE |
617                                                 PVRSRV_HAP_KERNEL_ONLY,
618                                  psMMUContext->hPDOSMemHandle);
619
620                 RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena,
621                         sSysPAddr.uiAddr, IMG_FALSE);
622
623         }
624
625         PVR_DPF(PVR_DBG_MESSAGE, "MMU_Finalise");
626
627         ppsMMUContext =
628             (struct MMU_CONTEXT **) &psMMUContext->psDevInfo->pvMMUContextList;
629         while (*ppsMMUContext) {
630                 if (*ppsMMUContext == psMMUContext) {
631
632                         *ppsMMUContext = psMMUContext->psNext;
633                         break;
634                 }
635
636                 ppsMMUContext = &((*ppsMMUContext)->psNext);
637         }
638
639         OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
640                   psMMUContext, NULL);
641 }
642
643 void MMU_InsertHeap(struct MMU_CONTEXT *psMMUContext,
644                     struct MMU_HEAP *psMMUHeap)
645 {
646         u32 *pui32PDCpuVAddr = (u32 *)psMMUContext->pvPDCpuVAddr;
647         u32 *pui32KernelPDCpuVAddr = (u32 *)
648                                         psMMUHeap->psMMUContext->pvPDCpuVAddr;
649         u32 ui32PDEntry;
650         IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
651
652         pui32PDCpuVAddr +=
653             psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
654                                                            SGX_MMU_PT_SHIFT);
655         pui32KernelPDCpuVAddr +=
656             psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
657                                                            SGX_MMU_PT_SHIFT);
658
659         PDUMPCOMMENT("Page directory shared heap range copy");
660
661         for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PTPageCount;
662              ui32PDEntry++) {
663
664                 PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
665
666                 pui32PDCpuVAddr[ui32PDEntry] =
667                     pui32KernelPDCpuVAddr[ui32PDEntry];
668                 if (pui32PDCpuVAddr[ui32PDEntry]) {
669                         PDUMPPAGETABLE((void *) &pui32PDCpuVAddr[ui32PDEntry],
670                                        sizeof(u32), IMG_FALSE,
671                                        PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
672
673                         bInvalidateDirectoryCache = IMG_TRUE;
674                 }
675         }
676
677         if (bInvalidateDirectoryCache)
678                 MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
679 }
680
681 #if defined(PDUMP)
682 static void MMU_PDumpPageTables(struct MMU_HEAP *pMMUHeap,
683                     struct IMG_DEV_VIRTADDR DevVAddr,
684                     size_t uSize, IMG_BOOL bForUnmap, void *hUniqueTag)
685 {
686         u32 ui32NumPTEntries;
687         u32 ui32PTIndex;
688         u32 *pui32PTEntry;
689
690         struct MMU_PT_INFO **ppsPTInfoList;
691         u32 ui32PDIndex;
692         u32 ui32PTDumpCount;
693
694         ui32NumPTEntries =
695             (uSize + SGX_MMU_PAGE_SIZE - 1) >> SGX_MMU_PAGE_SHIFT;
696
697         ui32PDIndex =
698             DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
699
700         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
701
702         ui32PTIndex = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
703
704         PDUMPCOMMENT("Page table mods (num entries == %08X) %s",
705                      ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
706
707         while (ui32NumPTEntries > 0) {
708                 struct MMU_PT_INFO *psPTInfo = *ppsPTInfoList++;
709
710                 if (ui32NumPTEntries <= 1024 - ui32PTIndex)
711                         ui32PTDumpCount = ui32NumPTEntries;
712                 else
713                         ui32PTDumpCount = 1024 - ui32PTIndex;
714
715                 if (psPTInfo) {
716                         pui32PTEntry = (u32 *)psPTInfo->PTPageCpuVAddr;
717                         PDUMPPAGETABLE((void *)&pui32PTEntry[ui32PTIndex],
718                                        ui32PTDumpCount * sizeof(u32), IMG_FALSE,
719                                        PDUMP_PT_UNIQUETAG, hUniqueTag);
720                 }
721
722                 ui32NumPTEntries -= ui32PTDumpCount;
723
724                 ui32PTIndex = 0;
725         }
726
727         PDUMPCOMMENT("Finished page table mods %s",
728                      bForUnmap ? "(for unmap)" : "");
729 }
730 #endif
731
732 static void MMU_UnmapPagesAndFreePTs(struct MMU_HEAP *psMMUHeap,
733                          struct IMG_DEV_VIRTADDR sDevVAddr,
734                          u32 ui32PageCount, void *hUniqueTag)
735 {
736         u32 uPageSize = HOST_PAGESIZE();
737         struct IMG_DEV_VIRTADDR sTmpDevVAddr;
738         u32 i;
739         u32 ui32PDIndex;
740         u32 ui32PTIndex;
741         u32 *pui32Tmp;
742         IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
743
744 #if !defined(PDUMP)
745         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
746 #endif
747
748         sTmpDevVAddr = sDevVAddr;
749
750         for (i = 0; i < ui32PageCount; i++) {
751                 struct MMU_PT_INFO **ppsPTInfoList;
752
753                 ui32PDIndex =
754                     sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
755                                             SGX_MMU_PT_SHIFT);
756
757                 ppsPTInfoList =
758                     &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
759
760                 {
761                         ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK)
762                                                 >> SGX_MMU_PAGE_SHIFT;
763
764                         if (!ppsPTInfoList[0]) {
765                                 PVR_DPF(PVR_DBG_MESSAGE,
766                                         "MMU_UnmapPagesAndFreePTs: "
767                                         "Invalid PT for alloc at VAddr:0x%08lX "
768                                         "(VaddrIni:0x%08lX AllocPage:%u) "
769                                         "PDIdx:%u PTIdx:%u",
770                                          sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
771                                          i, ui32PDIndex, ui32PTIndex);
772
773                                 sTmpDevVAddr.uiAddr += uPageSize;
774
775                                 continue;
776                         }
777
778                         pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
779
780                         if (!pui32Tmp)
781                                 continue;
782
783                         if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) {
784                                 ppsPTInfoList[0]->ui32ValidPTECount--;
785                         } else {
786                                 PVR_DPF(PVR_DBG_MESSAGE,
787                                          "MMU_UnmapPagesAndFreePTs: "
788                                          "Page is already invalid for alloc at "
789                                          "VAddr:0x%08lX "
790                                          "(VAddrIni:0x%08lX AllocPage:%u) "
791                                          "PDIdx:%u PTIdx:%u",
792                                          sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
793                                          i, ui32PDIndex, ui32PTIndex);
794                         }
795
796                         PVR_ASSERT((s32)ppsPTInfoList[0]->ui32ValidPTECount >=
797                                                                         0);
798                         pui32Tmp[ui32PTIndex] = 0;
799                 }
800
801                 if (ppsPTInfoList[0]
802                     && ppsPTInfoList[0]->ui32ValidPTECount == 0) {
803                         _DeferredFreePageTable(psMMUHeap,
804                                                ui32PDIndex - (psMMUHeap->
805                                                    ui32PTBaseIndex >>
806                                                        SGX_MMU_PT_SHIFT));
807                         bInvalidateDirectoryCache = IMG_TRUE;
808                 }
809
810                 sTmpDevVAddr.uiAddr += uPageSize;
811         }
812
813         if (bInvalidateDirectoryCache) {
814                 MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->
815                                                              psDevInfo);
816         } else {
817                 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->
818                                                              psDevInfo);
819         }
820
821 #if defined(PDUMP)
822         MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
823                             IMG_TRUE, hUniqueTag);
824 #endif
825 }
826
827 static void MMU_FreePageTables(void *pvMMUHeap, u32 ui32Start, u32 ui32End,
828                                void *hUniqueTag)
829 {
830         struct MMU_HEAP *pMMUHeap = (struct MMU_HEAP *)pvMMUHeap;
831         struct IMG_DEV_VIRTADDR Start;
832
833         Start.uiAddr = ui32Start;
834
835         MMU_UnmapPagesAndFreePTs(pMMUHeap, Start,
836                                  (ui32End - ui32Start) / SGX_MMU_PAGE_SIZE,
837                                  hUniqueTag);
838 }
839
840 struct MMU_HEAP *MMU_Create(struct MMU_CONTEXT *psMMUContext,
841                             struct DEV_ARENA_DESCRIPTOR *psDevArena,
842                             struct RA_ARENA **ppsVMArena)
843 {
844         struct MMU_HEAP *pMMUHeap;
845         IMG_BOOL bRes;
846
847         PVR_ASSERT(psDevArena != NULL);
848
849         if (psDevArena == NULL) {
850                 PVR_DPF(PVR_DBG_ERROR, "MMU_Create: invalid parameter");
851                 return NULL;
852         }
853
854         if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
855                    sizeof(struct MMU_HEAP), (void **)&pMMUHeap, NULL)
856                         != PVRSRV_OK) {
857                 PVR_DPF(PVR_DBG_ERROR,
858                          "MMU_Create: ERROR call to OSAllocMem failed");
859                 return NULL;
860         }
861
862         pMMUHeap->psMMUContext = psMMUContext;
863         pMMUHeap->psDevArena = psDevArena;
864
865         bRes = _AllocPageTables(pMMUHeap);
866         if (!bRes) {
867                 PVR_DPF(PVR_DBG_ERROR,
868                          "MMU_Create: ERROR call to _AllocPageTables failed");
869                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
870                           pMMUHeap, NULL);
871                 return NULL;
872         }
873
874         pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
875                                         psDevArena->BaseDevVAddr.uiAddr,
876                                         psDevArena->ui32Size, NULL,
877                                         SGX_MMU_PAGE_SIZE, NULL, NULL,
878                                         MMU_FreePageTables, pMMUHeap);
879
880         if (pMMUHeap->psVMArena == NULL) {
881                 PVR_DPF(PVR_DBG_ERROR,
882                          "MMU_Create: ERROR call to RA_Create failed");
883                 _DeferredFreePageTables(pMMUHeap);
884                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
885                           pMMUHeap, NULL);
886                 return NULL;
887         }
888
889         *ppsVMArena = pMMUHeap->psVMArena;
890
891         return pMMUHeap;
892 }
893
894 void MMU_Delete(struct MMU_HEAP *pMMUHeap)
895 {
896         if (pMMUHeap != NULL) {
897                 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Delete");
898
899                 if (pMMUHeap->psVMArena)
900                         RA_Delete(pMMUHeap->psVMArena);
901                 _DeferredFreePageTables(pMMUHeap);
902
903                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
904                           pMMUHeap, NULL);
905         }
906 }
907
908 IMG_BOOL MMU_Alloc(struct MMU_HEAP *pMMUHeap, size_t uSize, u32 uFlags,
909                    u32 uDevVAddrAlignment, struct IMG_DEV_VIRTADDR *psDevVAddr)
910 {
911         IMG_BOOL bStatus;
912
913         PVR_DPF(PVR_DBG_MESSAGE,
914                  "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
915                  uSize, uFlags, uDevVAddrAlignment);
916
917         if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) {
918                 bStatus = RA_Alloc(pMMUHeap->psVMArena, uSize, NULL, 0,
919                                    uDevVAddrAlignment, &(psDevVAddr->uiAddr));
920                 if (!bStatus) {
921                         PVR_DPF(PVR_DBG_ERROR,
922                                  "MMU_Alloc: RA_Alloc of VMArena failed");
923                         return bStatus;
924                 }
925         }
926
927         bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
928
929
930         if (!bStatus) {
931                 PVR_DPF(PVR_DBG_ERROR,
932                          "MMU_Alloc: _DeferredAllocPagetables failed");
933                 if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
934                         RA_Free(pMMUHeap->psVMArena, psDevVAddr->uiAddr,
935                                 IMG_FALSE);
936         }
937
938         return bStatus;
939 }
940
941 void MMU_Free(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
942               u32 ui32Size)
943 {
944         PVR_ASSERT(pMMUHeap != NULL);
945
946         if (pMMUHeap == NULL) {
947                 PVR_DPF(PVR_DBG_ERROR, "MMU_Free: invalid parameter");
948                 return;
949         }
950
951         PVR_DPF(PVR_DBG_MESSAGE,
952                  "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap,
953                  DevVAddr.uiAddr);
954
955         if ((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
956             (DevVAddr.uiAddr + ui32Size <=
957              pMMUHeap->psDevArena->BaseDevVAddr.uiAddr +
958              pMMUHeap->psDevArena->ui32Size)) {
959                 RA_Free(pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
960                 return;
961         }
962
963         BUG();
964
965         PVR_DPF(PVR_DBG_ERROR,
966                  "MMU_Free: Couldn't find DevVAddr %08X in a DevArena",
967                  DevVAddr.uiAddr);
968 }
969
970 void MMU_Enable(struct MMU_HEAP *pMMUHeap)
971 {
972         PVR_UNREFERENCED_PARAMETER(pMMUHeap);
973
974 }
975
976 void MMU_Disable(struct MMU_HEAP *pMMUHeap)
977 {
978         PVR_UNREFERENCED_PARAMETER(pMMUHeap);
979
980 }
981
982 static void MMU_MapPage(struct MMU_HEAP *pMMUHeap,
983             struct IMG_DEV_VIRTADDR DevVAddr,
984             struct IMG_DEV_PHYADDR DevPAddr, u32 ui32MemFlags)
985 {
986         u32 ui32Index;
987         u32 *pui32Tmp;
988         u32 ui32MMUFlags = 0;
989         struct MMU_PT_INFO **ppsPTInfoList;
990
991         if (((PVRSRV_MEM_READ | PVRSRV_MEM_WRITE) & ui32MemFlags) ==
992             (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE))
993                 ui32MMUFlags = 0;
994         else if (PVRSRV_MEM_READ & ui32MemFlags)
995                 ui32MMUFlags |= SGX_MMU_PTE_READONLY;
996         else if (PVRSRV_MEM_WRITE & ui32MemFlags)
997                 ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
998
999         if (PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
1000                 ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
1001
1002         if (PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
1003                 ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
1004
1005         ui32Index = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
1006
1007         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1008
1009         ui32Index = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1010
1011         pui32Tmp = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1012
1013
1014         if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
1015                 PVR_DPF(PVR_DBG_ERROR,
1016                                 "MMU_MapPage: "
1017                                 "Page is already valid for alloc at "
1018                                 "VAddr:0x%08lX PDIdx:%u PTIdx:%u",
1019                          DevVAddr.uiAddr,
1020                          DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1021                                              SGX_MMU_PT_SHIFT), ui32Index);
1022
1023         PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
1024
1025         ppsPTInfoList[0]->ui32ValidPTECount++;
1026
1027         pui32Tmp[ui32Index] = (DevPAddr.uiAddr & SGX_MMU_PTE_ADDR_MASK)
1028             | SGX_MMU_PTE_VALID | ui32MMUFlags;
1029 }
1030
1031 void MMU_MapScatter(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1032                     struct IMG_SYS_PHYADDR *psSysAddr, size_t uSize,
1033                     u32 ui32MemFlags, void *hUniqueTag)
1034 {
1035 #if defined(PDUMP)
1036         struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1037 #endif
1038         u32 uCount, i;
1039         struct IMG_DEV_PHYADDR DevPAddr;
1040
1041         PVR_ASSERT(pMMUHeap != NULL);
1042
1043 #if defined(PDUMP)
1044         MapBaseDevVAddr = DevVAddr;
1045 #else
1046         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1047 #endif
1048
1049         for (i = 0, uCount = 0; uCount < uSize;
1050              i++, uCount += SGX_MMU_PAGE_SIZE) {
1051                 struct IMG_SYS_PHYADDR sSysAddr;
1052
1053                 sSysAddr = psSysAddr[i];
1054
1055                 DevPAddr =
1056                     SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
1057
1058                 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1059                 DevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
1060
1061                 PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapScatter: "
1062                                 "devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
1063                          DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize);
1064         }
1065
1066 #if defined(PDUMP)
1067         MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1068                             hUniqueTag);
1069 #endif
1070 }
1071
1072 void MMU_MapPages(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1073                   struct IMG_SYS_PHYADDR SysPAddr, size_t uSize,
1074                   u32 ui32MemFlags, void *hUniqueTag)
1075 {
1076         struct IMG_DEV_PHYADDR DevPAddr;
1077 #if defined(PDUMP)
1078         struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1079 #endif
1080         u32 uCount;
1081         u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1082         u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1083
1084         PVR_ASSERT(pMMUHeap != NULL);
1085
1086         PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapPages: "
1087                  "mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
1088                  pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize);
1089
1090 #if defined(PDUMP)
1091         MapBaseDevVAddr = DevVAddr;
1092 #else
1093         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1094 #endif
1095
1096         DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
1097
1098         if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1099                 ui32PAdvance = 0;
1100
1101         for (uCount = 0; uCount < uSize; uCount += ui32VAdvance) {
1102                 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1103                 DevVAddr.uiAddr += ui32VAdvance;
1104                 DevPAddr.uiAddr += ui32PAdvance;
1105         }
1106
1107 #if defined(PDUMP)
1108         MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1109                             hUniqueTag);
1110 #endif
1111 }
1112
1113 void MMU_MapShadow(struct MMU_HEAP *pMMUHeap,
1114               struct IMG_DEV_VIRTADDR MapBaseDevVAddr,
1115               size_t uByteSize, void *CpuVAddr, void *hOSMemHandle,
1116               struct IMG_DEV_VIRTADDR *pDevVAddr, u32 ui32MemFlags,
1117               void *hUniqueTag)
1118 {
1119         u32 i;
1120         u32 uOffset = 0;
1121         struct IMG_DEV_VIRTADDR MapDevVAddr;
1122         u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1123         u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1124
1125 #if !defined(PDUMP)
1126         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1127 #endif
1128
1129         PVR_DPF(PVR_DBG_MESSAGE,
1130                  "MMU_MapShadow: %08X, 0x%x, %08X",
1131                  MapBaseDevVAddr.uiAddr, uByteSize, CpuVAddr);
1132
1133         PVR_ASSERT(((u32) CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1134         PVR_ASSERT(((u32) uByteSize & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1135         pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
1136
1137         if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1138                 ui32PAdvance = 0;
1139
1140         MapDevVAddr = MapBaseDevVAddr;
1141         for (i = 0; i < uByteSize; i += ui32VAdvance) {
1142                 struct IMG_CPU_PHYADDR CpuPAddr;
1143                 struct IMG_DEV_PHYADDR DevPAddr;
1144
1145                 if (CpuVAddr)
1146                         CpuPAddr =
1147                             OSMapLinToCPUPhys((void *)((u32)CpuVAddr +
1148                                                                     uOffset));
1149                 else
1150                         CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
1151                 DevPAddr =
1152                     SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
1153
1154                 PVR_DPF(PVR_DBG_MESSAGE, "0x%x: CpuVAddr=%08X, "
1155                                 "CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
1156                          uOffset, (u32)CpuVAddr + uOffset, CpuPAddr.uiAddr,
1157                          MapDevVAddr.uiAddr, DevPAddr.uiAddr);
1158
1159                 MMU_MapPage(pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
1160
1161                 MapDevVAddr.uiAddr += ui32VAdvance;
1162                 uOffset += ui32PAdvance;
1163         }
1164
1165 #if defined(PDUMP)
1166         MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE,
1167                             hUniqueTag);
1168 #endif
1169 }
1170
1171 void MMU_UnmapPages(struct MMU_HEAP *psMMUHeap,
1172                    struct IMG_DEV_VIRTADDR sDevVAddr, u32 ui32PageCount,
1173                    void *hUniqueTag)
1174 {
1175         u32 uPageSize = HOST_PAGESIZE();
1176         struct IMG_DEV_VIRTADDR sTmpDevVAddr;
1177         u32 i;
1178         u32 ui32PDIndex;
1179         u32 ui32PTIndex;
1180         u32 *pui32Tmp;
1181
1182 #if !defined(PDUMP)
1183         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1184 #endif
1185
1186         sTmpDevVAddr = sDevVAddr;
1187
1188         for (i = 0; i < ui32PageCount; i++) {
1189                 struct MMU_PT_INFO **ppsPTInfoList;
1190
1191                 ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1192                                                       SGX_MMU_PT_SHIFT);
1193
1194                 ppsPTInfoList = &psMMUHeap->psMMUContext->
1195                                                 apsPTInfoList[ui32PDIndex];
1196
1197                 ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >>
1198                                                         SGX_MMU_PAGE_SHIFT;
1199
1200                 if (!ppsPTInfoList[0]) {
1201                         PVR_DPF(PVR_DBG_ERROR,
1202                                 "MMU_UnmapPages: "
1203                                 "ERROR Invalid PT for alloc at VAddr:0x%08lX "
1204                                 "(VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u "
1205                                 "PTIdx:%u",
1206                                  sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1207                                  ui32PDIndex, ui32PTIndex);
1208
1209                         sTmpDevVAddr.uiAddr += uPageSize;
1210
1211                         continue;
1212                 }
1213
1214                 pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
1215
1216                 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
1217                         ppsPTInfoList[0]->ui32ValidPTECount--;
1218                 else
1219                         PVR_DPF(PVR_DBG_ERROR,
1220                                 "MMU_UnmapPages: Page is already invalid "
1221                                 "for alloc at VAddr:0x%08lX "
1222                                 "(VAddrIni:0x%08lX AllocPage:%u) "
1223                                 "PDIdx:%u PTIdx:%u",
1224                                  sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1225                                  ui32PDIndex, ui32PTIndex);
1226
1227                 PVR_ASSERT((s32) ppsPTInfoList[0]->ui32ValidPTECount >= 0);
1228
1229                 pui32Tmp[ui32PTIndex] = 0;
1230
1231                 sTmpDevVAddr.uiAddr += uPageSize;
1232         }
1233
1234         MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
1235
1236 #if defined(PDUMP)
1237         MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
1238                             IMG_TRUE, hUniqueTag);
1239 #endif
1240 }
1241
1242 struct IMG_DEV_PHYADDR MMU_GetPhysPageAddr(struct MMU_HEAP *pMMUHeap,
1243                                         struct IMG_DEV_VIRTADDR sDevVPageAddr)
1244 {
1245         u32 *pui32PageTable;
1246         u32 ui32Index;
1247         struct IMG_DEV_PHYADDR sDevPAddr;
1248         struct MMU_PT_INFO **ppsPTInfoList;
1249
1250         ui32Index = sDevVPageAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1251                                              SGX_MMU_PT_SHIFT);
1252
1253         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1254         if (!ppsPTInfoList[0]) {
1255                 PVR_DPF(PVR_DBG_ERROR,
1256                          "MMU_GetPhysPageAddr: Not mapped in at 0x%08x",
1257                          sDevVPageAddr.uiAddr);
1258                 sDevPAddr.uiAddr = 0;
1259                 return sDevPAddr;
1260         }
1261
1262         ui32Index =
1263             (sDevVPageAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1264
1265         pui32PageTable = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1266
1267         sDevPAddr.uiAddr = pui32PageTable[ui32Index];
1268
1269         sDevPAddr.uiAddr &= SGX_MMU_PTE_ADDR_MASK;
1270
1271         return sDevPAddr;
1272 }
1273
1274 struct IMG_DEV_PHYADDR MMU_GetPDDevPAddr(struct MMU_CONTEXT *pMMUContext)
1275 {
1276         return pMMUContext->sPDDevPAddr;
1277 }
1278
1279 enum PVRSRV_ERROR SGXGetPhysPageAddrKM(void *hDevMemHeap,
1280                                       struct IMG_DEV_VIRTADDR sDevVAddr,
1281                                       struct IMG_DEV_PHYADDR *pDevPAddr,
1282                                       struct IMG_CPU_PHYADDR *pCpuPAddr)
1283 {
1284         struct MMU_HEAP *pMMUHeap;
1285         struct IMG_DEV_PHYADDR DevPAddr;
1286
1287         pMMUHeap = (struct MMU_HEAP *)BM_GetMMUHeap(hDevMemHeap);
1288
1289         DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
1290         pCpuPAddr->uiAddr = DevPAddr.uiAddr;
1291         pDevPAddr->uiAddr = DevPAddr.uiAddr;
1292
1293         return (pDevPAddr->uiAddr != 0) ?
1294                 PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
1295 }
1296
1297 enum PVRSRV_ERROR SGXGetMMUPDAddrKM(void *hDevCookie,
1298                                void *hDevMemContext,
1299                                struct IMG_DEV_PHYADDR *psPDDevPAddr)
1300 {
1301         if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
1302                 return PVRSRV_ERROR_INVALID_PARAMS;
1303
1304         *psPDDevPAddr =
1305             ((struct BM_CONTEXT *)hDevMemContext)->psMMUContext->sPDDevPAddr;
1306
1307         return PVRSRV_OK;
1308 }
1309
1310 enum PVRSRV_ERROR MMU_BIFResetPDAlloc(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1311 {
1312         enum PVRSRV_ERROR eError;
1313         struct SYS_DATA *psSysData;
1314         struct RA_ARENA *psLocalDevMemArena;
1315         void *hOSMemHandle = NULL;
1316         u8 *pui8MemBlock = NULL;
1317         struct IMG_SYS_PHYADDR sMemBlockSysPAddr;
1318         struct IMG_CPU_PHYADDR sMemBlockCpuPAddr;
1319
1320         eError = SysAcquireData(&psSysData);
1321         if (eError != PVRSRV_OK) {
1322                 PVR_DPF(PVR_DBG_ERROR,
1323                    "MMU_BIFResetPDAlloc: ERROR call to SysAcquireData failed");
1324                 return eError;
1325         }
1326
1327         psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1328
1329         if (psLocalDevMemArena == NULL) {
1330
1331                 eError =
1332                     OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
1333                                  PVRSRV_HAP_KERNEL_ONLY, 3 * SGX_MMU_PAGE_SIZE,
1334                                  SGX_MMU_PAGE_SIZE, (void **)&pui8MemBlock,
1335                                  &hOSMemHandle);
1336                 if (eError != PVRSRV_OK) {
1337                         PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1338                                         "ERROR call to OSAllocPages failed");
1339                         return eError;
1340                 }
1341                 sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
1342         } else {
1343                 if (RA_Alloc(psLocalDevMemArena, 3 * SGX_MMU_PAGE_SIZE,
1344                              NULL, 0, SGX_MMU_PAGE_SIZE,
1345                              &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE) {
1346                         PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1347                                         "ERROR call to RA_Alloc failed");
1348                         return PVRSRV_ERROR_OUT_OF_MEMORY;
1349                 }
1350
1351                 sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
1352                 pui8MemBlock = (void __force *)OSMapPhysToLin(sMemBlockCpuPAddr,
1353                                               SGX_MMU_PAGE_SIZE * 3,
1354                                               PVRSRV_HAP_WRITECOMBINE |
1355                                               PVRSRV_HAP_KERNEL_ONLY,
1356                                               &hOSMemHandle);
1357                 if (!pui8MemBlock) {
1358                         PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1359                                         "ERROR failed to map page tables");
1360                         return PVRSRV_ERROR_BAD_MAPPING;
1361                 }
1362         }
1363
1364         psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
1365         psDevInfo->sBIFResetPDDevPAddr =
1366             SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
1367         psDevInfo->sBIFResetPTDevPAddr.uiAddr =
1368             psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1369         psDevInfo->sBIFResetPageDevPAddr.uiAddr =
1370             psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1371         psDevInfo->pui32BIFResetPD = (u32 *) pui8MemBlock;
1372         psDevInfo->pui32BIFResetPT =
1373             (u32 *) (pui8MemBlock + SGX_MMU_PAGE_SIZE);
1374
1375         OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
1376         OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
1377
1378         OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB,
1379                  SGX_MMU_PAGE_SIZE);
1380
1381         return PVRSRV_OK;
1382 }
1383
1384 void MMU_BIFResetPDFree(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1385 {
1386         enum PVRSRV_ERROR eError;
1387         struct SYS_DATA *psSysData;
1388         struct RA_ARENA *psLocalDevMemArena;
1389         struct IMG_SYS_PHYADDR sPDSysPAddr;
1390
1391         eError = SysAcquireData(&psSysData);
1392         if (eError != PVRSRV_OK) {
1393                 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDFree: "
1394                                 "ERROR call to SysAcquireData failed");
1395                 return;
1396         }
1397
1398         psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1399
1400         if (psLocalDevMemArena == NULL) {
1401                 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1402                             3 * SGX_MMU_PAGE_SIZE,
1403                             psDevInfo->pui32BIFResetPD,
1404                             psDevInfo->hBIFResetPDOSMemHandle);
1405         } else {
1406                 OSUnMapPhysToLin((void __force __iomem *)
1407                                         psDevInfo->pui32BIFResetPD,
1408                                  3 * SGX_MMU_PAGE_SIZE,
1409                                  PVRSRV_HAP_WRITECOMBINE |
1410                                         PVRSRV_HAP_KERNEL_ONLY,
1411                                  psDevInfo->hBIFResetPDOSMemHandle);
1412
1413                 sPDSysPAddr =
1414                     SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX,
1415                                           psDevInfo->sBIFResetPDDevPAddr);
1416                 RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
1417         }
1418 }
1419
1420 u32 mmu_get_page_dir(struct MMU_CONTEXT *psMMUContext)
1421 {
1422         return psMMUContext->sPDDevPAddr.uiAddr;
1423 }
1424
1425
1426 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PVR_DEBUG)
1427
1428 static int
1429 hwrec_mem_dump_page(u32 dev_p_addr)
1430 {
1431         void __iomem *page;
1432
1433         page = ioremap_nocache(dev_p_addr, SGX_MMU_PAGE_SIZE);
1434         if (!page)
1435                 return -EFAULT;
1436
1437         /* Loop through all the pages and dump them */
1438         hwrec_mem_print("<PAGE PA:0x%08X>\n", dev_p_addr);
1439         hwrec_mem_write((void __force *) page, PAGE_SIZE);
1440         hwrec_mem_print("</PAGE>\n");
1441
1442         iounmap(page);
1443
1444         return 0;
1445 }
1446
1447 static int
1448 hwrec_mem_dump_table(u32 dev_p_addr)
1449 {
1450         void __iomem *pt;
1451         u32 i;
1452
1453         pt = ioremap_nocache(dev_p_addr, SGX_MMU_PAGE_SIZE);
1454         if (!pt)
1455                 return -EFAULT;
1456
1457         /* Loop through all the page tables and dump them */
1458         hwrec_mem_print("<TABLE PA:0x%08X>\n", dev_p_addr);
1459         for (i = 0 ; i < 1024 ; i++)
1460                 hwrec_mem_print("0x%08X\n", readl(pt + 4 * i));
1461         hwrec_mem_print("</TABLE>\n");
1462
1463         for (i = 0; i < 1024; i++) {
1464                 u32 addr = readl(pt + 4 * i);
1465
1466                 if (addr & SGX_MMU_PDE_VALID)
1467                         hwrec_mem_dump_page(addr & SGX_MMU_PDE_ADDR_MASK);
1468         }
1469
1470         iounmap(pt);
1471
1472         return 0;
1473 }
1474
1475 static int
1476 hwrec_mem_dump_dir(struct MMU_CONTEXT *context)
1477 {
1478         void __iomem *pd = (void __force __iomem *) context->pvPDCpuVAddr;
1479
1480         int i;
1481
1482         hwrec_mem_print("<DIR PA:0x%08X>\n", context->sPDDevPAddr);
1483
1484         for (i = 0; i < 1024; i++)
1485                 hwrec_mem_print("0x%08X\n", readl(pd + 4 * i));
1486
1487         hwrec_mem_print("</DIR>\n");
1488
1489         for (i = 0; i < 1024; i++) {
1490                 u32 addr = readl(pd + 4 * i);
1491
1492                 if (addr & SGX_MMU_PDE_VALID)
1493                         hwrec_mem_dump_table(addr & SGX_MMU_PDE_ADDR_MASK);
1494         }
1495
1496         return 0;
1497 }
1498
1499 int
1500 mmu_hwrec_mem_dump(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1501 {
1502         struct MMU_CONTEXT *context = psDevInfo->pvMMUContextList;
1503         u32 page_dir;
1504
1505         page_dir = readl(psDevInfo->pvRegsBaseKM + EUR_CR_BIF_DIR_LIST_BASE0);
1506
1507         while (context) {
1508                 if (context->sPDDevPAddr.uiAddr == page_dir)
1509                         break;
1510
1511                 context = context->psNext;
1512         }
1513
1514         if (!context) {
1515                 pr_err("Unable to find matching context for page directory"
1516                        " 0x%08X\n", page_dir);
1517                 return -EFAULT;
1518         }
1519
1520         return hwrec_mem_dump_dir(context);
1521 }
1522
1523 #endif /* CONFIG_DEBUG_FS && CONFIG_PVR_DEBUG */