fixes for bc_cat
[sgx.git] / pvr / mmu.c
1 /**********************************************************************
2  *
3  * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful but, except
10  * as otherwise stated in writing, without any warranty; without even the
11  * implied warranty of merchantability or fitness for a particular purpose.
12  * See the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * Imagination Technologies Ltd. <gpl-support@imgtec.com>
23  * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
24  *
25  ******************************************************************************/
26
27 #include "sgxdefs.h"
28 #include "sgxmmu.h"
29 #include "services_headers.h"
30 #include "buffer_manager.h"
31 #include "hash.h"
32 #include "ra.h"
33 #include "pvr_pdump.h"
34 #include "sgxapi_km.h"
35 #include "sgx_bridge_km.h"
36 #include "sgxinfo.h"
37 #include "sgxinfokm.h"
38 #include "mmu.h"
39
40 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PVR_DEBUG)
41 #include "pvr_debugfs.h"
42 #include <linux/io.h>
43 #endif
44
45 #define UINT32_MAX_VALUE        0xFFFFFFFFUL
46
47 struct MMU_PT_INFO {
48         void *hPTPageOSMemHandle;
49         void *PTPageCpuVAddr;
50         u32 ui32ValidPTECount;
51 };
52
53 struct MMU_CONTEXT {
54         struct PVRSRV_DEVICE_NODE *psDeviceNode;
55         void *pvPDCpuVAddr;
56         struct IMG_DEV_PHYADDR sPDDevPAddr;
57         void *hPDOSMemHandle;
58         struct MMU_PT_INFO *apsPTInfoList[1024];
59         struct PVRSRV_SGXDEV_INFO *psDevInfo;
60         struct MMU_CONTEXT *psNext;
61 };
62
63 struct MMU_HEAP {
64         struct MMU_CONTEXT *psMMUContext;
65
66         u32 ui32PTBaseIndex;
67         u32 ui32PTPageCount;
68         u32 ui32PTEntryCount;
69
70         struct RA_ARENA *psVMArena;
71
72         struct DEV_ARENA_DESCRIPTOR *psDevArena;
73 };
74
75 #define PAGE_TEST                                       0
76
77
78 void MMU_InvalidateDirectoryCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
79 {
80         psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;
81 }
82
83 static void MMU_InvalidatePageTableCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
84 {
85         psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;
86 }
87
88 static IMG_BOOL _AllocPageTables(struct MMU_HEAP *pMMUHeap)
89 {
90         PVR_DPF(PVR_DBG_MESSAGE, "_AllocPageTables()");
91
92         PVR_ASSERT(pMMUHeap != NULL);
93         PVR_ASSERT(HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE);
94
95         if (pMMUHeap == NULL) {
96                 PVR_DPF(PVR_DBG_ERROR, "_AllocPageTables: invalid parameter");
97                 return IMG_FALSE;
98         }
99
100         pMMUHeap->ui32PTEntryCount =
101             pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT;
102
103         pMMUHeap->ui32PTBaseIndex =
104             (pMMUHeap->psDevArena->BaseDevVAddr.
105              uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >>
106                                                         SGX_MMU_PAGE_SHIFT;
107
108         pMMUHeap->ui32PTPageCount =
109             (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - 1) >>
110                                                         SGX_MMU_PT_SHIFT;
111
112         return IMG_TRUE;
113 }
114
115 static void _DeferredFreePageTable(struct MMU_HEAP *pMMUHeap, u32 ui32PTIndex)
116 {
117         u32 *pui32PDEntry;
118         u32 i;
119         u32 ui32PDIndex;
120         struct SYS_DATA *psSysData;
121         struct MMU_PT_INFO **ppsPTInfoList;
122
123         if (SysAcquireData(&psSysData) != PVRSRV_OK) {
124                 PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePageTables: "
125                                         "ERROR call to SysAcquireData failed");
126                 return;
127         }
128
129         ui32PDIndex =
130             pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
131                                                           SGX_MMU_PT_SHIFT);
132
133         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
134
135         {
136                 PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == NULL ||
137                            ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount ==
138                                                                          0);
139         }
140
141         PDUMPCOMMENT("Free page table (page count == %08X)",
142                      pMMUHeap->ui32PTPageCount);
143         if (ppsPTInfoList[ui32PTIndex]
144             && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
145                 PDUMPFREEPAGETABLE(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
146
147         switch (pMMUHeap->psDevArena->DevMemHeapType) {
148         case DEVICE_MEMORY_HEAP_SHARED:
149         case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
150                 {
151                         struct MMU_CONTEXT *psMMUContext =
152                           (struct MMU_CONTEXT *)
153                             pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
154
155                         while (psMMUContext) {
156                                 pui32PDEntry =
157                                     (u32 *) psMMUContext->pvPDCpuVAddr;
158                                 pui32PDEntry += ui32PDIndex;
159                                 pui32PDEntry[ui32PTIndex] = 0;
160                                 PDUMPPAGETABLE((void *) &pui32PDEntry
161                                                [ui32PTIndex],
162                                                sizeof(u32), IMG_FALSE,
163                                                PDUMP_PT_UNIQUETAG,
164                                                PDUMP_PT_UNIQUETAG);
165                                 psMMUContext = psMMUContext->psNext;
166                         }
167                         break;
168                 }
169         case DEVICE_MEMORY_HEAP_PERCONTEXT:
170         case DEVICE_MEMORY_HEAP_KERNEL:
171                 {
172
173                         pui32PDEntry =
174                             (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
175                         pui32PDEntry += ui32PDIndex;
176                         pui32PDEntry[ui32PTIndex] = 0;
177                         PDUMPPAGETABLE((void *) &pui32PDEntry[ui32PTIndex],
178                                        sizeof(u32), IMG_FALSE,
179                                        PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
180                         break;
181                 }
182         default:
183                 {
184                         PVR_DPF(PVR_DBG_ERROR,
185                         "_DeferredFreePagetable: ERROR invalid heap type");
186                         return;
187                 }
188         }
189
190         if (ppsPTInfoList[ui32PTIndex] != NULL) {
191                 if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != NULL) {
192                         u32 *pui32Tmp;
193
194                         pui32Tmp =
195                             (u32 *) ppsPTInfoList[ui32PTIndex]->
196                             PTPageCpuVAddr;
197
198                         for (i = 0;
199                              (i < pMMUHeap->ui32PTEntryCount) && (i < 1024);
200                              i++)
201                                 pui32Tmp[i] = 0;
202
203                         if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
204                             psLocalDevMemArena == NULL) {
205                                 OSFreePages(PVRSRV_HAP_WRITECOMBINE |
206                                             PVRSRV_HAP_KERNEL_ONLY,
207                                             SGX_MMU_PAGE_SIZE,
208                                             ppsPTInfoList[ui32PTIndex]->
209                                                     PTPageCpuVAddr,
210                                             ppsPTInfoList[ui32PTIndex]->
211                                                     hPTPageOSMemHandle);
212                         } else {
213                                 struct IMG_SYS_PHYADDR sSysPAddr;
214                                 struct IMG_CPU_PHYADDR sCpuPAddr;
215
216                                 sCpuPAddr =
217                                     OSMapLinToCPUPhys(ppsPTInfoList
218                                                       [ui32PTIndex]->
219                                                       PTPageCpuVAddr);
220                                 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
221
222                                 OSUnMapPhysToLin((void __force __iomem *)
223                                                    ppsPTInfoList[ui32PTIndex]->
224                                                         PTPageCpuVAddr,
225                                                  SGX_MMU_PAGE_SIZE,
226                                                  PVRSRV_HAP_WRITECOMBINE |
227                                                          PVRSRV_HAP_KERNEL_ONLY,
228                                                  ppsPTInfoList[ui32PTIndex]->
229                                                          hPTPageOSMemHandle);
230
231                                 RA_Free(pMMUHeap->psDevArena->
232                                                 psDeviceMemoryHeapInfo->
233                                                         psLocalDevMemArena,
234                                         sSysPAddr.uiAddr, IMG_FALSE);
235                         }
236
237                         pMMUHeap->ui32PTEntryCount -= i;
238                 } else {
239                         pMMUHeap->ui32PTEntryCount -= 1024;
240                 }
241
242                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
243                           sizeof(struct MMU_PT_INFO),
244                           ppsPTInfoList[ui32PTIndex], NULL);
245                 ppsPTInfoList[ui32PTIndex] = NULL;
246         } else {
247                 pMMUHeap->ui32PTEntryCount -= 1024;
248         }
249
250         PDUMPCOMMENT("Finished free page table (page count == %08X)",
251                      pMMUHeap->ui32PTPageCount);
252 }
253
254 static void _DeferredFreePageTables(struct MMU_HEAP *pMMUHeap)
255 {
256         u32 i;
257
258         for (i = 0; i < pMMUHeap->ui32PTPageCount; i++)
259                 _DeferredFreePageTable(pMMUHeap, i);
260         MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
261 }
262
263 static IMG_BOOL _DeferredAllocPagetables(struct MMU_HEAP *pMMUHeap,
264                                 struct IMG_DEV_VIRTADDR DevVAddr, u32 ui32Size)
265 {
266         u32 ui32PTPageCount;
267         u32 ui32PDIndex;
268         u32 i;
269         u32 *pui32PDEntry;
270         struct MMU_PT_INFO **ppsPTInfoList;
271         struct SYS_DATA *psSysData;
272         struct IMG_DEV_VIRTADDR sHighDevVAddr;
273
274         PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE));
275
276         if (SysAcquireData(&psSysData) != PVRSRV_OK)
277                 return IMG_FALSE;
278
279         ui32PDIndex =
280             DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
281
282         if ((UINT32_MAX_VALUE - DevVAddr.uiAddr) <
283             (ui32Size + (1 << (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT)) - 1)) {
284
285                 sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
286         } else {
287                 sHighDevVAddr.uiAddr = DevVAddr.uiAddr + ui32Size +
288                                         (1 << (SGX_MMU_PAGE_SHIFT +
289                                                SGX_MMU_PT_SHIFT)) - 1;
290         }
291
292         ui32PTPageCount =
293             sHighDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
294
295         ui32PTPageCount -= ui32PDIndex;
296
297         pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
298         pui32PDEntry += ui32PDIndex;
299
300         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
301
302         PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
303         PDUMPCOMMENT("Page directory mods (page count == %08X)",
304                      ui32PTPageCount);
305
306         for (i = 0; i < ui32PTPageCount; i++) {
307                 if (ppsPTInfoList[i] == NULL) {
308                         if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
309                                    sizeof(struct MMU_PT_INFO),
310                                    (void **) &ppsPTInfoList[i], NULL)
311                                         != PVRSRV_OK) {
312                                 PVR_DPF(PVR_DBG_ERROR,
313                                         "_DeferredAllocPagetables: "
314                                         "ERROR call to OSAllocMem failed");
315                                 return IMG_FALSE;
316                         }
317                         OSMemSet(ppsPTInfoList[i], 0,
318                                  sizeof(struct MMU_PT_INFO));
319                 }
320
321                 if (ppsPTInfoList[i]->hPTPageOSMemHandle == NULL &&
322                     ppsPTInfoList[i]->PTPageCpuVAddr == NULL) {
323                         struct IMG_CPU_PHYADDR sCpuPAddr;
324                         struct IMG_DEV_PHYADDR sDevPAddr;
325
326                         PVR_ASSERT(pui32PDEntry[i] == 0);
327
328                         if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
329                             psLocalDevMemArena == NULL) {
330                                 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
331                                                      PVRSRV_HAP_KERNEL_ONLY,
332                                              SGX_MMU_PAGE_SIZE,
333                                              SGX_MMU_PAGE_SIZE,
334                                              (void **)&ppsPTInfoList[i]->
335                                                 PTPageCpuVAddr,
336                                              &ppsPTInfoList[i]->
337                                                 hPTPageOSMemHandle) !=
338                                     PVRSRV_OK) {
339                                         PVR_DPF(PVR_DBG_ERROR,
340                                            "_DeferredAllocPagetables: "
341                                            "ERROR call to OSAllocPages failed");
342                                         return IMG_FALSE;
343                                 }
344
345                                 if (ppsPTInfoList[i]->PTPageCpuVAddr) {
346                                         sCpuPAddr =
347                                             OSMapLinToCPUPhys(ppsPTInfoList[i]->
348                                                               PTPageCpuVAddr);
349                                 } else {
350                                         sCpuPAddr =
351                                             OSMemHandleToCpuPAddr(
352                                                 ppsPTInfoList[i]->
353                                                           hPTPageOSMemHandle,
354                                                 0);
355                                 }
356                                 sDevPAddr =
357                                     SysCpuPAddrToDevPAddr
358                                             (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
359                         } else {
360                                 struct IMG_SYS_PHYADDR sSysPAddr;
361
362                                 if (RA_Alloc(pMMUHeap->psDevArena->
363                                      psDeviceMemoryHeapInfo->psLocalDevMemArena,
364                                      SGX_MMU_PAGE_SIZE, NULL, 0,
365                                      SGX_MMU_PAGE_SIZE,
366                                      &(sSysPAddr.uiAddr)) != IMG_TRUE) {
367                                         PVR_DPF(PVR_DBG_ERROR,
368                                                "_DeferredAllocPagetables: "
369                                                "ERROR call to RA_Alloc failed");
370                                         return IMG_FALSE;
371                                 }
372
373                                 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
374                                 ppsPTInfoList[i]->PTPageCpuVAddr =
375                                     (void __force *)
376                                     OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
377                                                    PVRSRV_HAP_WRITECOMBINE |
378                                                    PVRSRV_HAP_KERNEL_ONLY,
379                                                    &ppsPTInfoList[i]->
380                                                    hPTPageOSMemHandle);
381                                 if (!ppsPTInfoList[i]->PTPageCpuVAddr) {
382                                         PVR_DPF(PVR_DBG_ERROR,
383                                              "_DeferredAllocPagetables: "
384                                              "ERROR failed to map page tables");
385                                         return IMG_FALSE;
386                                 }
387
388                                 sDevPAddr = SysCpuPAddrToDevPAddr
389                                             (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
390
391                         }
392
393
394                         OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0,
395                                  SGX_MMU_PAGE_SIZE);
396
397                         PDUMPMALLOCPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
398                                              PDUMP_PT_UNIQUETAG);
399
400                         PDUMPPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
401                                        SGX_MMU_PAGE_SIZE, IMG_TRUE,
402                                        PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
403
404                         switch (pMMUHeap->psDevArena->DevMemHeapType) {
405                         case DEVICE_MEMORY_HEAP_SHARED:
406                         case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
407                                 {
408                                         struct MMU_CONTEXT *psMMUContext =
409                                             (struct MMU_CONTEXT *)pMMUHeap->
410                                                     psMMUContext->psDevInfo->
411                                                             pvMMUContextList;
412
413                                         while (psMMUContext) {
414                                                 pui32PDEntry =
415                                                     (u32 *)psMMUContext->
416                                                                 pvPDCpuVAddr;
417                                                 pui32PDEntry += ui32PDIndex;
418
419                                                 pui32PDEntry[i] =
420                                                     sDevPAddr.uiAddr |
421                                                         SGX_MMU_PDE_VALID;
422
423                                                 PDUMPPAGETABLE
424                                                     ((void *)&pui32PDEntry[i],
425                                                      sizeof(u32), IMG_FALSE,
426                                                      PDUMP_PD_UNIQUETAG,
427                                                      PDUMP_PT_UNIQUETAG);
428
429                                                 psMMUContext =
430                                                     psMMUContext->psNext;
431                                         }
432                                         break;
433                                 }
434                         case DEVICE_MEMORY_HEAP_PERCONTEXT:
435                         case DEVICE_MEMORY_HEAP_KERNEL:
436                                 {
437                                         pui32PDEntry[i] = sDevPAddr.uiAddr |
438                                                              SGX_MMU_PDE_VALID;
439
440                                         PDUMPPAGETABLE((void *)&pui32PDEntry[i],
441                                                        sizeof(u32), IMG_FALSE,
442                                                        PDUMP_PD_UNIQUETAG,
443                                                        PDUMP_PT_UNIQUETAG);
444
445                                         break;
446                                 }
447                         default:
448                                 {
449                                         PVR_DPF(PVR_DBG_ERROR,
450                                                 "_DeferredAllocPagetables: "
451                                                 "ERROR invalid heap type");
452                                         return IMG_FALSE;
453                                 }
454                         }
455
456
457                         MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->
458                                                      psDevInfo);
459                 } else {
460
461                         PVR_ASSERT(pui32PDEntry[i] != 0);
462                 }
463         }
464
465         return IMG_TRUE;
466 }
467
468 enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode,
469                             struct MMU_CONTEXT **ppsMMUContext,
470                             struct IMG_DEV_PHYADDR *psPDDevPAddr)
471 {
472         u32 *pui32Tmp;
473         u32 i;
474         void *pvPDCpuVAddr;
475         struct IMG_DEV_PHYADDR sPDDevPAddr;
476         struct IMG_CPU_PHYADDR sCpuPAddr;
477         struct IMG_SYS_PHYADDR sSysPAddr;
478         struct MMU_CONTEXT *psMMUContext;
479         void *hPDOSMemHandle;
480         struct SYS_DATA *psSysData;
481         struct PVRSRV_SGXDEV_INFO *psDevInfo;
482
483         PVR_DPF(PVR_DBG_MESSAGE, "MMU_Initialise");
484
485         if (SysAcquireData(&psSysData) != PVRSRV_OK) {
486                 PVR_DPF(PVR_DBG_ERROR,
487                          "MMU_Initialise: ERROR call to SysAcquireData failed");
488                 return PVRSRV_ERROR_GENERIC;
489         }
490
491         if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
492                    sizeof(struct MMU_CONTEXT), (void **) &psMMUContext, NULL)
493                         != PVRSRV_OK) {
494                 PVR_DPF(PVR_DBG_ERROR,
495                          "MMU_Initialise: ERROR call to OSAllocMem failed");
496                 return PVRSRV_ERROR_GENERIC;
497         }
498         OSMemSet(psMMUContext, 0, sizeof(struct MMU_CONTEXT));
499
500         psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
501         psMMUContext->psDevInfo = psDevInfo;
502
503         psMMUContext->psDeviceNode = psDeviceNode;
504
505         if (psDeviceNode->psLocalDevMemArena == NULL) {
506                 if (OSAllocPages
507                     (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
508                      SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr,
509                      &hPDOSMemHandle) != PVRSRV_OK) {
510                         PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
511                                         "ERROR call to OSAllocPages failed");
512                         goto err1;
513                 }
514
515                 if (pvPDCpuVAddr)
516                         sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
517                 else
518                         sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
519                 sPDDevPAddr =
520                     SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
521         } else {
522                 if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
523                              SGX_MMU_PAGE_SIZE, NULL, 0, SGX_MMU_PAGE_SIZE,
524                              &(sSysPAddr.uiAddr)) != IMG_TRUE) {
525                         PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
526                                         "ERROR call to RA_Alloc failed");
527
528                         goto err1;
529                 }
530
531                 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
532                 sPDDevPAddr =
533                     SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
534                 pvPDCpuVAddr = (void __force *)
535                     OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
536                                    PVRSRV_HAP_WRITECOMBINE |
537                                    PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle);
538                 if (!pvPDCpuVAddr) {
539                         PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
540                                         "ERROR failed to map page tables");
541
542                         goto err2;
543                 }
544         }
545
546         PDUMPCOMMENT("Alloc page directory");
547
548         PDUMPMALLOCPAGETABLE(pvPDCpuVAddr, PDUMP_PD_UNIQUETAG);
549
550         if (pvPDCpuVAddr) {
551                 pui32Tmp = (u32 *) pvPDCpuVAddr;
552         } else {
553                 PVR_DPF(PVR_DBG_ERROR,
554                          "MMU_Initialise: pvPDCpuVAddr invalid");
555                 goto err3;
556         }
557
558         for (i = 0; i < SGX_MMU_PD_SIZE; i++)
559                 pui32Tmp[i] = 0;
560
561         PDUMPCOMMENT("Page directory contents");
562         PDUMPPAGETABLE(pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, IMG_TRUE,
563                        PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
564
565         psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
566         psMMUContext->sPDDevPAddr = sPDDevPAddr;
567         psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
568
569         *ppsMMUContext = psMMUContext;
570
571         *psPDDevPAddr = sPDDevPAddr;
572
573         psMMUContext->psNext = (struct MMU_CONTEXT *)
574                                                 psDevInfo->pvMMUContextList;
575         psDevInfo->pvMMUContextList = (void *) psMMUContext;
576
577
578         return PVRSRV_OK;
579 err3:
580         if (psDeviceNode->psLocalDevMemArena)
581                 OSUnMapPhysToLin((void __iomem __force *)pvPDCpuVAddr,
582                                  SGX_MMU_PAGE_SIZE, PVRSRV_HAP_WRITECOMBINE |
583                                         PVRSRV_HAP_KERNEL_ONLY,
584                                  hPDOSMemHandle);
585 err2:
586         if (!psDeviceNode->psLocalDevMemArena)
587                 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
588                             SGX_MMU_PAGE_SIZE, pvPDCpuVAddr, hPDOSMemHandle);
589         else
590                 RA_Free(psDeviceNode->psLocalDevMemArena,
591                         sSysPAddr.uiAddr, IMG_FALSE);
592 err1:
593         OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
594                   psMMUContext, NULL);
595
596         return PVRSRV_ERROR_GENERIC;
597 }
598
599 void MMU_Finalise(struct MMU_CONTEXT *psMMUContext)
600 {
601         u32 *pui32Tmp, i;
602         struct SYS_DATA *psSysData;
603         struct MMU_CONTEXT **ppsMMUContext;
604
605         if (SysAcquireData(&psSysData) != PVRSRV_OK) {
606                 PVR_DPF(PVR_DBG_ERROR,
607                          "MMU_Finalise: ERROR call to SysAcquireData failed");
608                 return;
609         }
610
611         PDUMPCOMMENT("Free page directory");
612         PDUMPFREEPAGETABLE(psMMUContext->pvPDCpuVAddr);
613
614         pui32Tmp = (u32 *) psMMUContext->pvPDCpuVAddr;
615
616         for (i = 0; i < SGX_MMU_PD_SIZE; i++)
617                 pui32Tmp[i] = 0;
618
619         if (psMMUContext->psDeviceNode->psLocalDevMemArena == NULL) {
620                 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
621                             SGX_MMU_PAGE_SIZE,
622                             psMMUContext->pvPDCpuVAddr,
623                             psMMUContext->hPDOSMemHandle);
624
625         } else {
626                 struct IMG_SYS_PHYADDR sSysPAddr;
627                 struct IMG_CPU_PHYADDR sCpuPAddr;
628
629                 sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
630                 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
631
632                 OSUnMapPhysToLin((void __iomem __force *)
633                                         psMMUContext->pvPDCpuVAddr,
634                                  SGX_MMU_PAGE_SIZE,
635                                  PVRSRV_HAP_WRITECOMBINE |
636                                                 PVRSRV_HAP_KERNEL_ONLY,
637                                  psMMUContext->hPDOSMemHandle);
638
639                 RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena,
640                         sSysPAddr.uiAddr, IMG_FALSE);
641
642         }
643
644         PVR_DPF(PVR_DBG_MESSAGE, "MMU_Finalise");
645
646         ppsMMUContext =
647             (struct MMU_CONTEXT **) &psMMUContext->psDevInfo->pvMMUContextList;
648         while (*ppsMMUContext) {
649                 if (*ppsMMUContext == psMMUContext) {
650
651                         *ppsMMUContext = psMMUContext->psNext;
652                         break;
653                 }
654
655                 ppsMMUContext = &((*ppsMMUContext)->psNext);
656         }
657
658         OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
659                   psMMUContext, NULL);
660 }
661
662 void MMU_InsertHeap(struct MMU_CONTEXT *psMMUContext,
663                     struct MMU_HEAP *psMMUHeap)
664 {
665         u32 *pui32PDCpuVAddr = (u32 *)psMMUContext->pvPDCpuVAddr;
666         u32 *pui32KernelPDCpuVAddr = (u32 *)
667                                         psMMUHeap->psMMUContext->pvPDCpuVAddr;
668         u32 ui32PDEntry;
669         IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
670
671         pui32PDCpuVAddr +=
672             psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
673                                                            SGX_MMU_PT_SHIFT);
674         pui32KernelPDCpuVAddr +=
675             psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
676                                                            SGX_MMU_PT_SHIFT);
677
678         PDUMPCOMMENT("Page directory shared heap range copy");
679
680         for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PTPageCount;
681              ui32PDEntry++) {
682
683                 PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
684
685                 pui32PDCpuVAddr[ui32PDEntry] =
686                     pui32KernelPDCpuVAddr[ui32PDEntry];
687                 if (pui32PDCpuVAddr[ui32PDEntry]) {
688                         PDUMPPAGETABLE((void *) &pui32PDCpuVAddr[ui32PDEntry],
689                                        sizeof(u32), IMG_FALSE,
690                                        PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
691
692                         bInvalidateDirectoryCache = IMG_TRUE;
693                 }
694         }
695
696         if (bInvalidateDirectoryCache)
697                 MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
698 }
699
700 #if defined(PDUMP)
701 static void MMU_PDumpPageTables(struct MMU_HEAP *pMMUHeap,
702                     struct IMG_DEV_VIRTADDR DevVAddr,
703                     size_t uSize, IMG_BOOL bForUnmap, void *hUniqueTag)
704 {
705         u32 ui32NumPTEntries;
706         u32 ui32PTIndex;
707         u32 *pui32PTEntry;
708
709         struct MMU_PT_INFO **ppsPTInfoList;
710         u32 ui32PDIndex;
711         u32 ui32PTDumpCount;
712
713         ui32NumPTEntries =
714             (uSize + SGX_MMU_PAGE_SIZE - 1) >> SGX_MMU_PAGE_SHIFT;
715
716         ui32PDIndex =
717             DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
718
719         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
720
721         ui32PTIndex = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
722
723         PDUMPCOMMENT("Page table mods (num entries == %08X) %s",
724                      ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
725
726         while (ui32NumPTEntries > 0) {
727                 struct MMU_PT_INFO *psPTInfo = *ppsPTInfoList++;
728
729                 if (ui32NumPTEntries <= 1024 - ui32PTIndex)
730                         ui32PTDumpCount = ui32NumPTEntries;
731                 else
732                         ui32PTDumpCount = 1024 - ui32PTIndex;
733
734                 if (psPTInfo) {
735                         pui32PTEntry = (u32 *)psPTInfo->PTPageCpuVAddr;
736                         PDUMPPAGETABLE((void *)&pui32PTEntry[ui32PTIndex],
737                                        ui32PTDumpCount * sizeof(u32), IMG_FALSE,
738                                        PDUMP_PT_UNIQUETAG, hUniqueTag);
739                 }
740
741                 ui32NumPTEntries -= ui32PTDumpCount;
742
743                 ui32PTIndex = 0;
744         }
745
746         PDUMPCOMMENT("Finished page table mods %s",
747                      bForUnmap ? "(for unmap)" : "");
748 }
749 #endif
750
751 static void MMU_UnmapPagesAndFreePTs(struct MMU_HEAP *psMMUHeap,
752                          struct IMG_DEV_VIRTADDR sDevVAddr,
753                          u32 ui32PageCount, void *hUniqueTag)
754 {
755         u32 uPageSize = HOST_PAGESIZE();
756         struct IMG_DEV_VIRTADDR sTmpDevVAddr;
757         u32 i;
758         u32 ui32PDIndex;
759         u32 ui32PTIndex;
760         u32 *pui32Tmp;
761         IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
762
763 #if !defined(PDUMP)
764         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
765 #endif
766
767         sTmpDevVAddr = sDevVAddr;
768
769         for (i = 0; i < ui32PageCount; i++) {
770                 struct MMU_PT_INFO **ppsPTInfoList;
771
772                 ui32PDIndex =
773                     sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
774                                             SGX_MMU_PT_SHIFT);
775
776                 ppsPTInfoList =
777                     &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
778
779                 {
780                         ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK)
781                                                 >> SGX_MMU_PAGE_SHIFT;
782
783                         if (!ppsPTInfoList[0]) {
784                                 PVR_DPF(PVR_DBG_MESSAGE,
785                                         "MMU_UnmapPagesAndFreePTs: "
786                                         "Invalid PT for alloc at VAddr:0x%08lX "
787                                         "(VaddrIni:0x%08lX AllocPage:%u) "
788                                         "PDIdx:%u PTIdx:%u",
789                                          sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
790                                          i, ui32PDIndex, ui32PTIndex);
791
792                                 sTmpDevVAddr.uiAddr += uPageSize;
793
794                                 continue;
795                         }
796
797                         pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
798
799                         if (!pui32Tmp)
800                                 continue;
801
802                         if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) {
803                                 ppsPTInfoList[0]->ui32ValidPTECount--;
804                         } else {
805                                 PVR_DPF(PVR_DBG_MESSAGE,
806                                          "MMU_UnmapPagesAndFreePTs: "
807                                          "Page is already invalid for alloc at "
808                                          "VAddr:0x%08lX "
809                                          "(VAddrIni:0x%08lX AllocPage:%u) "
810                                          "PDIdx:%u PTIdx:%u",
811                                          sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
812                                          i, ui32PDIndex, ui32PTIndex);
813                         }
814
815                         PVR_ASSERT((s32)ppsPTInfoList[0]->ui32ValidPTECount >=
816                                                                         0);
817                         pui32Tmp[ui32PTIndex] = 0;
818                 }
819
820                 if (ppsPTInfoList[0]
821                     && ppsPTInfoList[0]->ui32ValidPTECount == 0) {
822                         _DeferredFreePageTable(psMMUHeap,
823                                                ui32PDIndex - (psMMUHeap->
824                                                    ui32PTBaseIndex >>
825                                                        SGX_MMU_PT_SHIFT));
826                         bInvalidateDirectoryCache = IMG_TRUE;
827                 }
828
829                 sTmpDevVAddr.uiAddr += uPageSize;
830         }
831
832         if (bInvalidateDirectoryCache) {
833                 MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->
834                                                              psDevInfo);
835         } else {
836                 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->
837                                                              psDevInfo);
838         }
839
840 #if defined(PDUMP)
841         MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
842                             IMG_TRUE, hUniqueTag);
843 #endif
844 }
845
846 static void MMU_FreePageTables(void *pvMMUHeap, u32 ui32Start, u32 ui32End,
847                                void *hUniqueTag)
848 {
849         struct MMU_HEAP *pMMUHeap = (struct MMU_HEAP *)pvMMUHeap;
850         struct IMG_DEV_VIRTADDR Start;
851
852         Start.uiAddr = ui32Start;
853
854         MMU_UnmapPagesAndFreePTs(pMMUHeap, Start,
855                                  (ui32End - ui32Start) / SGX_MMU_PAGE_SIZE,
856                                  hUniqueTag);
857 }
858
859 struct MMU_HEAP *MMU_Create(struct MMU_CONTEXT *psMMUContext,
860                             struct DEV_ARENA_DESCRIPTOR *psDevArena,
861                             struct RA_ARENA **ppsVMArena)
862 {
863         struct MMU_HEAP *pMMUHeap;
864         IMG_BOOL bRes;
865
866         PVR_ASSERT(psDevArena != NULL);
867
868         if (psDevArena == NULL) {
869                 PVR_DPF(PVR_DBG_ERROR, "MMU_Create: invalid parameter");
870                 return NULL;
871         }
872
873         if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
874                    sizeof(struct MMU_HEAP), (void **)&pMMUHeap, NULL)
875                         != PVRSRV_OK) {
876                 PVR_DPF(PVR_DBG_ERROR,
877                          "MMU_Create: ERROR call to OSAllocMem failed");
878                 return NULL;
879         }
880
881         pMMUHeap->psMMUContext = psMMUContext;
882         pMMUHeap->psDevArena = psDevArena;
883
884         bRes = _AllocPageTables(pMMUHeap);
885         if (!bRes) {
886                 PVR_DPF(PVR_DBG_ERROR,
887                          "MMU_Create: ERROR call to _AllocPageTables failed");
888                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
889                           pMMUHeap, NULL);
890                 return NULL;
891         }
892
893         pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
894                                         psDevArena->BaseDevVAddr.uiAddr,
895                                         psDevArena->ui32Size, NULL,
896                                         SGX_MMU_PAGE_SIZE, NULL, NULL,
897                                         MMU_FreePageTables, pMMUHeap);
898
899         if (pMMUHeap->psVMArena == NULL) {
900                 PVR_DPF(PVR_DBG_ERROR,
901                          "MMU_Create: ERROR call to RA_Create failed");
902                 _DeferredFreePageTables(pMMUHeap);
903                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
904                           pMMUHeap, NULL);
905                 return NULL;
906         }
907
908         *ppsVMArena = pMMUHeap->psVMArena;
909
910         return pMMUHeap;
911 }
912
913 void MMU_Delete(struct MMU_HEAP *pMMUHeap)
914 {
915         if (pMMUHeap != NULL) {
916                 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Delete");
917
918                 if (pMMUHeap->psVMArena)
919                         RA_Delete(pMMUHeap->psVMArena);
920                 _DeferredFreePageTables(pMMUHeap);
921
922                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
923                           pMMUHeap, NULL);
924         }
925 }
926
927 IMG_BOOL MMU_Alloc(struct MMU_HEAP *pMMUHeap, size_t uSize, u32 uFlags,
928                    u32 uDevVAddrAlignment, struct IMG_DEV_VIRTADDR *psDevVAddr)
929 {
930         IMG_BOOL bStatus;
931
932         PVR_DPF(PVR_DBG_MESSAGE,
933                  "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
934                  uSize, uFlags, uDevVAddrAlignment);
935
936         if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) {
937                 bStatus = RA_Alloc(pMMUHeap->psVMArena, uSize, NULL, 0,
938                                    uDevVAddrAlignment, &(psDevVAddr->uiAddr));
939                 if (!bStatus) {
940                         PVR_DPF(PVR_DBG_ERROR,
941                                  "MMU_Alloc: RA_Alloc of VMArena failed");
942                         return bStatus;
943                 }
944         }
945
946         bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
947
948
949         if (!bStatus) {
950                 PVR_DPF(PVR_DBG_ERROR,
951                          "MMU_Alloc: _DeferredAllocPagetables failed");
952                 if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
953                         RA_Free(pMMUHeap->psVMArena, psDevVAddr->uiAddr,
954                                 IMG_FALSE);
955         }
956
957         return bStatus;
958 }
959
960 void MMU_Free(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
961               u32 ui32Size)
962 {
963         PVR_ASSERT(pMMUHeap != NULL);
964
965         if (pMMUHeap == NULL) {
966                 PVR_DPF(PVR_DBG_ERROR, "MMU_Free: invalid parameter");
967                 return;
968         }
969
970         PVR_DPF(PVR_DBG_MESSAGE,
971                  "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap,
972                  DevVAddr.uiAddr);
973
974         if ((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
975             (DevVAddr.uiAddr + ui32Size <=
976              pMMUHeap->psDevArena->BaseDevVAddr.uiAddr +
977              pMMUHeap->psDevArena->ui32Size)) {
978                 RA_Free(pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
979                 return;
980         }
981
982         BUG();
983
984         PVR_DPF(PVR_DBG_ERROR,
985                  "MMU_Free: Couldn't find DevVAddr %08X in a DevArena",
986                  DevVAddr.uiAddr);
987 }
988
989 void MMU_Enable(struct MMU_HEAP *pMMUHeap)
990 {
991         PVR_UNREFERENCED_PARAMETER(pMMUHeap);
992
993 }
994
995 void MMU_Disable(struct MMU_HEAP *pMMUHeap)
996 {
997         PVR_UNREFERENCED_PARAMETER(pMMUHeap);
998
999 }
1000
1001 static void MMU_MapPage(struct MMU_HEAP *pMMUHeap,
1002             struct IMG_DEV_VIRTADDR DevVAddr,
1003             struct IMG_DEV_PHYADDR DevPAddr, u32 ui32MemFlags)
1004 {
1005         u32 ui32Index;
1006         u32 *pui32Tmp;
1007         u32 ui32MMUFlags = 0;
1008         struct MMU_PT_INFO **ppsPTInfoList;
1009
1010         if (((PVRSRV_MEM_READ | PVRSRV_MEM_WRITE) & ui32MemFlags) ==
1011             (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE))
1012                 ui32MMUFlags = 0;
1013         else if (PVRSRV_MEM_READ & ui32MemFlags)
1014                 ui32MMUFlags |= SGX_MMU_PTE_READONLY;
1015         else if (PVRSRV_MEM_WRITE & ui32MemFlags)
1016                 ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
1017
1018         if (PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
1019                 ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
1020
1021         if (PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
1022                 ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
1023
1024         ui32Index = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
1025
1026         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1027
1028         ui32Index = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1029
1030         pui32Tmp = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1031
1032
1033         if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
1034                 PVR_DPF(PVR_DBG_ERROR,
1035                                 "MMU_MapPage: "
1036                                 "Page is already valid for alloc at "
1037                                 "VAddr:0x%08lX PDIdx:%u PTIdx:%u",
1038                          DevVAddr.uiAddr,
1039                          DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1040                                              SGX_MMU_PT_SHIFT), ui32Index);
1041
1042         PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
1043
1044         ppsPTInfoList[0]->ui32ValidPTECount++;
1045
1046         pui32Tmp[ui32Index] = (DevPAddr.uiAddr & SGX_MMU_PTE_ADDR_MASK)
1047             | SGX_MMU_PTE_VALID | ui32MMUFlags;
1048 }
1049
1050 void MMU_MapScatter(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1051                     struct IMG_SYS_PHYADDR *psSysAddr, size_t uSize,
1052                     u32 ui32MemFlags, void *hUniqueTag)
1053 {
1054 #if defined(PDUMP)
1055         struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1056 #endif
1057         u32 uCount, i;
1058         struct IMG_DEV_PHYADDR DevPAddr;
1059
1060         PVR_ASSERT(pMMUHeap != NULL);
1061
1062 #if defined(PDUMP)
1063         MapBaseDevVAddr = DevVAddr;
1064 #else
1065         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1066 #endif
1067
1068         for (i = 0, uCount = 0; uCount < uSize;
1069              i++, uCount += SGX_MMU_PAGE_SIZE) {
1070                 struct IMG_SYS_PHYADDR sSysAddr;
1071
1072                 sSysAddr = psSysAddr[i];
1073
1074                 DevPAddr =
1075                     SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
1076
1077                 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1078                 DevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
1079
1080                 PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapScatter: "
1081                                 "devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
1082                          DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize);
1083         }
1084
1085 #if defined(PDUMP)
1086         MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1087                             hUniqueTag);
1088 #endif
1089 }
1090
1091 void MMU_MapPages(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1092                   struct IMG_SYS_PHYADDR SysPAddr, size_t uSize,
1093                   u32 ui32MemFlags, void *hUniqueTag)
1094 {
1095         struct IMG_DEV_PHYADDR DevPAddr;
1096 #if defined(PDUMP)
1097         struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1098 #endif
1099         u32 uCount;
1100         u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1101         u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1102
1103         PVR_ASSERT(pMMUHeap != NULL);
1104
1105         PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapPages: "
1106                  "mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
1107                  pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize);
1108
1109 #if defined(PDUMP)
1110         MapBaseDevVAddr = DevVAddr;
1111 #else
1112         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1113 #endif
1114
1115         DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
1116
1117         if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1118                 ui32PAdvance = 0;
1119
1120         for (uCount = 0; uCount < uSize; uCount += ui32VAdvance) {
1121                 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1122                 DevVAddr.uiAddr += ui32VAdvance;
1123                 DevPAddr.uiAddr += ui32PAdvance;
1124         }
1125
1126 #if defined(PDUMP)
1127         MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1128                             hUniqueTag);
1129 #endif
1130 }
1131
1132 void MMU_MapShadow(struct MMU_HEAP *pMMUHeap,
1133               struct IMG_DEV_VIRTADDR MapBaseDevVAddr,
1134               size_t uByteSize, void *CpuVAddr, void *hOSMemHandle,
1135               struct IMG_DEV_VIRTADDR *pDevVAddr, u32 ui32MemFlags,
1136               void *hUniqueTag)
1137 {
1138         u32 i;
1139         u32 uOffset = 0;
1140         struct IMG_DEV_VIRTADDR MapDevVAddr;
1141         u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1142         u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1143
1144 #if !defined(PDUMP)
1145         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1146 #endif
1147
1148         PVR_DPF(PVR_DBG_MESSAGE,
1149                  "MMU_MapShadow: %08X, 0x%x, %08X",
1150                  MapBaseDevVAddr.uiAddr, uByteSize, CpuVAddr);
1151
1152         PVR_ASSERT(((u32) CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1153         PVR_ASSERT(((u32) uByteSize & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1154         pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
1155
1156         if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1157                 ui32PAdvance = 0;
1158
1159         MapDevVAddr = MapBaseDevVAddr;
1160         for (i = 0; i < uByteSize; i += ui32VAdvance) {
1161                 struct IMG_CPU_PHYADDR CpuPAddr;
1162                 struct IMG_DEV_PHYADDR DevPAddr;
1163
1164                 if (CpuVAddr)
1165                         CpuPAddr =
1166                             OSMapLinToCPUPhys((void *)((u32)CpuVAddr +
1167                                                                     uOffset));
1168                 else
1169                         CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
1170                 DevPAddr =
1171                     SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
1172
1173                 PVR_DPF(PVR_DBG_MESSAGE, "0x%x: CpuVAddr=%08X, "
1174                                 "CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
1175                          uOffset, (u32)CpuVAddr + uOffset, CpuPAddr.uiAddr,
1176                          MapDevVAddr.uiAddr, DevPAddr.uiAddr);
1177
1178                 MMU_MapPage(pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
1179
1180                 MapDevVAddr.uiAddr += ui32VAdvance;
1181                 uOffset += ui32PAdvance;
1182         }
1183
1184 #if defined(PDUMP)
1185         MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE,
1186                             hUniqueTag);
1187 #endif
1188 }
1189
1190 void MMU_UnmapPages(struct MMU_HEAP *psMMUHeap,
1191                    struct IMG_DEV_VIRTADDR sDevVAddr, u32 ui32PageCount,
1192                    void *hUniqueTag)
1193 {
1194         u32 uPageSize = HOST_PAGESIZE();
1195         struct IMG_DEV_VIRTADDR sTmpDevVAddr;
1196         u32 i;
1197         u32 ui32PDIndex;
1198         u32 ui32PTIndex;
1199         u32 *pui32Tmp;
1200
1201 #if !defined(PDUMP)
1202         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1203 #endif
1204
1205         sTmpDevVAddr = sDevVAddr;
1206
1207         for (i = 0; i < ui32PageCount; i++) {
1208                 struct MMU_PT_INFO **ppsPTInfoList;
1209
1210                 ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1211                                                       SGX_MMU_PT_SHIFT);
1212
1213                 ppsPTInfoList = &psMMUHeap->psMMUContext->
1214                                                 apsPTInfoList[ui32PDIndex];
1215
1216                 ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >>
1217                                                         SGX_MMU_PAGE_SHIFT;
1218
1219                 if (!ppsPTInfoList[0]) {
1220                         PVR_DPF(PVR_DBG_ERROR,
1221                                 "MMU_UnmapPages: "
1222                                 "ERROR Invalid PT for alloc at VAddr:0x%08lX "
1223                                 "(VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u "
1224                                 "PTIdx:%u",
1225                                  sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1226                                  ui32PDIndex, ui32PTIndex);
1227
1228                         sTmpDevVAddr.uiAddr += uPageSize;
1229
1230                         continue;
1231                 }
1232
1233                 pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
1234
1235                 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
1236                         ppsPTInfoList[0]->ui32ValidPTECount--;
1237                 else
1238                         PVR_DPF(PVR_DBG_ERROR,
1239                                 "MMU_UnmapPages: Page is already invalid "
1240                                 "for alloc at VAddr:0x%08lX "
1241                                 "(VAddrIni:0x%08lX AllocPage:%u) "
1242                                 "PDIdx:%u PTIdx:%u",
1243                                  sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1244                                  ui32PDIndex, ui32PTIndex);
1245
1246                 PVR_ASSERT((s32) ppsPTInfoList[0]->ui32ValidPTECount >= 0);
1247
1248                 pui32Tmp[ui32PTIndex] = 0;
1249
1250                 sTmpDevVAddr.uiAddr += uPageSize;
1251         }
1252
1253         MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
1254
1255 #if defined(PDUMP)
1256         MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
1257                             IMG_TRUE, hUniqueTag);
1258 #endif
1259 }
1260
1261 struct IMG_DEV_PHYADDR MMU_GetPhysPageAddr(struct MMU_HEAP *pMMUHeap,
1262                                         struct IMG_DEV_VIRTADDR sDevVPageAddr)
1263 {
1264         u32 *pui32PageTable;
1265         u32 ui32Index;
1266         struct IMG_DEV_PHYADDR sDevPAddr;
1267         struct MMU_PT_INFO **ppsPTInfoList;
1268
1269         ui32Index = sDevVPageAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1270                                              SGX_MMU_PT_SHIFT);
1271
1272         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1273         if (!ppsPTInfoList[0]) {
1274                 PVR_DPF(PVR_DBG_ERROR,
1275                          "MMU_GetPhysPageAddr: Not mapped in at 0x%08x",
1276                          sDevVPageAddr.uiAddr);
1277                 sDevPAddr.uiAddr = 0;
1278                 return sDevPAddr;
1279         }
1280
1281         ui32Index =
1282             (sDevVPageAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1283
1284         pui32PageTable = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1285
1286         sDevPAddr.uiAddr = pui32PageTable[ui32Index];
1287
1288         sDevPAddr.uiAddr &= SGX_MMU_PTE_ADDR_MASK;
1289
1290         return sDevPAddr;
1291 }
1292
1293 struct IMG_DEV_PHYADDR MMU_GetPDDevPAddr(struct MMU_CONTEXT *pMMUContext)
1294 {
1295         return pMMUContext->sPDDevPAddr;
1296 }
1297
1298 enum PVRSRV_ERROR SGXGetPhysPageAddrKM(void *hDevMemHeap,
1299                                       struct IMG_DEV_VIRTADDR sDevVAddr,
1300                                       struct IMG_DEV_PHYADDR *pDevPAddr,
1301                                       struct IMG_CPU_PHYADDR *pCpuPAddr)
1302 {
1303         struct MMU_HEAP *pMMUHeap;
1304         struct IMG_DEV_PHYADDR DevPAddr;
1305
1306         pMMUHeap = (struct MMU_HEAP *)BM_GetMMUHeap(hDevMemHeap);
1307
1308         DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
1309         pCpuPAddr->uiAddr = DevPAddr.uiAddr;
1310         pDevPAddr->uiAddr = DevPAddr.uiAddr;
1311
1312         return (pDevPAddr->uiAddr != 0) ?
1313                 PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
1314 }
1315
1316 enum PVRSRV_ERROR SGXGetMMUPDAddrKM(void *hDevCookie,
1317                                void *hDevMemContext,
1318                                struct IMG_DEV_PHYADDR *psPDDevPAddr)
1319 {
1320         if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
1321                 return PVRSRV_ERROR_INVALID_PARAMS;
1322
1323         *psPDDevPAddr =
1324             ((struct BM_CONTEXT *)hDevMemContext)->psMMUContext->sPDDevPAddr;
1325
1326         return PVRSRV_OK;
1327 }
1328
1329 enum PVRSRV_ERROR MMU_BIFResetPDAlloc(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1330 {
1331         enum PVRSRV_ERROR eError;
1332         struct SYS_DATA *psSysData;
1333         struct RA_ARENA *psLocalDevMemArena;
1334         void *hOSMemHandle = NULL;
1335         u8 *pui8MemBlock = NULL;
1336         struct IMG_SYS_PHYADDR sMemBlockSysPAddr;
1337         struct IMG_CPU_PHYADDR sMemBlockCpuPAddr;
1338
1339         eError = SysAcquireData(&psSysData);
1340         if (eError != PVRSRV_OK) {
1341                 PVR_DPF(PVR_DBG_ERROR,
1342                    "MMU_BIFResetPDAlloc: ERROR call to SysAcquireData failed");
1343                 return eError;
1344         }
1345
1346         psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1347
1348         if (psLocalDevMemArena == NULL) {
1349
1350                 eError =
1351                     OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
1352                                  PVRSRV_HAP_KERNEL_ONLY, 3 * SGX_MMU_PAGE_SIZE,
1353                                  SGX_MMU_PAGE_SIZE, (void **)&pui8MemBlock,
1354                                  &hOSMemHandle);
1355                 if (eError != PVRSRV_OK) {
1356                         PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1357                                         "ERROR call to OSAllocPages failed");
1358                         return eError;
1359                 }
1360                 sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
1361         } else {
1362                 if (RA_Alloc(psLocalDevMemArena, 3 * SGX_MMU_PAGE_SIZE,
1363                              NULL, 0, SGX_MMU_PAGE_SIZE,
1364                              &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE) {
1365                         PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1366                                         "ERROR call to RA_Alloc failed");
1367                         return PVRSRV_ERROR_OUT_OF_MEMORY;
1368                 }
1369
1370                 sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
1371                 pui8MemBlock = (void __force *)OSMapPhysToLin(sMemBlockCpuPAddr,
1372                                               SGX_MMU_PAGE_SIZE * 3,
1373                                               PVRSRV_HAP_WRITECOMBINE |
1374                                               PVRSRV_HAP_KERNEL_ONLY,
1375                                               &hOSMemHandle);
1376                 if (!pui8MemBlock) {
1377                         PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1378                                         "ERROR failed to map page tables");
1379                         return PVRSRV_ERROR_BAD_MAPPING;
1380                 }
1381         }
1382
1383         psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
1384         psDevInfo->sBIFResetPDDevPAddr =
1385             SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
1386         psDevInfo->sBIFResetPTDevPAddr.uiAddr =
1387             psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1388         psDevInfo->sBIFResetPageDevPAddr.uiAddr =
1389             psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1390         psDevInfo->pui32BIFResetPD = (u32 *) pui8MemBlock;
1391         psDevInfo->pui32BIFResetPT =
1392             (u32 *) (pui8MemBlock + SGX_MMU_PAGE_SIZE);
1393
1394         OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
1395         OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
1396
1397         OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB,
1398                  SGX_MMU_PAGE_SIZE);
1399
1400         return PVRSRV_OK;
1401 }
1402
1403 void MMU_BIFResetPDFree(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1404 {
1405         enum PVRSRV_ERROR eError;
1406         struct SYS_DATA *psSysData;
1407         struct RA_ARENA *psLocalDevMemArena;
1408         struct IMG_SYS_PHYADDR sPDSysPAddr;
1409
1410         eError = SysAcquireData(&psSysData);
1411         if (eError != PVRSRV_OK) {
1412                 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDFree: "
1413                                 "ERROR call to SysAcquireData failed");
1414                 return;
1415         }
1416
1417         psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1418
1419         if (psLocalDevMemArena == NULL) {
1420                 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1421                             3 * SGX_MMU_PAGE_SIZE,
1422                             psDevInfo->pui32BIFResetPD,
1423                             psDevInfo->hBIFResetPDOSMemHandle);
1424         } else {
1425                 OSUnMapPhysToLin((void __force __iomem *)
1426                                         psDevInfo->pui32BIFResetPD,
1427                                  3 * SGX_MMU_PAGE_SIZE,
1428                                  PVRSRV_HAP_WRITECOMBINE |
1429                                         PVRSRV_HAP_KERNEL_ONLY,
1430                                  psDevInfo->hBIFResetPDOSMemHandle);
1431
1432                 sPDSysPAddr =
1433                     SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX,
1434                                           psDevInfo->sBIFResetPDDevPAddr);
1435                 RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
1436         }
1437 }
1438
1439 u32 mmu_get_page_dir(struct MMU_CONTEXT *psMMUContext)
1440 {
1441         return psMMUContext->sPDDevPAddr.uiAddr;
1442 }
1443
1444
1445 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PVR_DEBUG)
1446
1447 static int
1448 hwrec_mem_dump_page(u32 dev_p_addr)
1449 {
1450         void __iomem *page;
1451
1452         page = ioremap_nocache(dev_p_addr, SGX_MMU_PAGE_SIZE);
1453         if (!page)
1454                 return -EFAULT;
1455
1456         /* Loop through all the pages and dump them */
1457         hwrec_mem_print("<PAGE PA:0x%08X>\n", dev_p_addr);
1458         hwrec_mem_write((void __force *) page, PAGE_SIZE);
1459         hwrec_mem_print("</PAGE>\n");
1460
1461         iounmap(page);
1462
1463         return 0;
1464 }
1465
1466 static int
1467 hwrec_mem_dump_table(u32 dev_p_addr)
1468 {
1469         void __iomem *pt;
1470         u32 i;
1471
1472         pt = ioremap_nocache(dev_p_addr, SGX_MMU_PAGE_SIZE);
1473         if (!pt)
1474                 return -EFAULT;
1475
1476         /* Loop through all the page tables and dump them */
1477         hwrec_mem_print("<TABLE PA:0x%08X>\n", dev_p_addr);
1478         for (i = 0 ; i < 1024 ; i++)
1479                 hwrec_mem_print("0x%08X\n", readl(pt + 4 * i));
1480         hwrec_mem_print("</TABLE>\n");
1481
1482         for (i = 0; i < 1024; i++) {
1483                 u32 addr = readl(pt + 4 * i);
1484
1485                 if (addr & SGX_MMU_PDE_VALID)
1486                         hwrec_mem_dump_page(addr & SGX_MMU_PDE_ADDR_MASK);
1487         }
1488
1489         iounmap(pt);
1490
1491         return 0;
1492 }
1493
1494 static int
1495 hwrec_mem_dump_dir(struct MMU_CONTEXT *context)
1496 {
1497         void __iomem *pd = (void __force __iomem *) context->pvPDCpuVAddr;
1498
1499         int i;
1500
1501         hwrec_mem_print("<DIR PA:0x%08X>\n", context->sPDDevPAddr);
1502
1503         for (i = 0; i < 1024; i++)
1504                 hwrec_mem_print("0x%08X\n", readl(pd + 4 * i));
1505
1506         hwrec_mem_print("</DIR>\n");
1507
1508         for (i = 0; i < 1024; i++) {
1509                 u32 addr = readl(pd + 4 * i);
1510
1511                 if (addr & SGX_MMU_PDE_VALID)
1512                         hwrec_mem_dump_table(addr & SGX_MMU_PDE_ADDR_MASK);
1513         }
1514
1515         return 0;
1516 }
1517
1518 int
1519 mmu_hwrec_mem_dump(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1520 {
1521         struct MMU_CONTEXT *context = psDevInfo->pvMMUContextList;
1522         u32 page_dir;
1523
1524         page_dir = readl(psDevInfo->pvRegsBaseKM + EUR_CR_BIF_DIR_LIST_BASE0);
1525
1526         while (context) {
1527                 if (context->sPDDevPAddr.uiAddr == page_dir)
1528                         break;
1529
1530                 context = context->psNext;
1531         }
1532
1533         if (!context) {
1534                 pr_err("Unable to find matching context for page directory"
1535                        " 0x%08X\n", page_dir);
1536                 return -EFAULT;
1537         }
1538
1539         return hwrec_mem_dump_dir(context);
1540 }
1541
1542 #endif /* CONFIG_DEBUG_FS && CONFIG_PVR_DEBUG */