gpu: pvr: Check OSAllocMem return value.
[sgx.git] / pvr / mmu.c
1 /**********************************************************************
2  *
3  * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful but, except
10  * as otherwise stated in writing, without any warranty; without even the
11  * implied warranty of merchantability or fitness for a particular purpose.
12  * See the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * Imagination Technologies Ltd. <gpl-support@imgtec.com>
23  * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
24  *
25  ******************************************************************************/
26
27 #include "sgxdefs.h"
28 #include "sgxmmu.h"
29 #include "services_headers.h"
30 #include "buffer_manager.h"
31 #include "hash.h"
32 #include "ra.h"
33 #include "pdump_km.h"
34 #include "sgxapi_km.h"
35 #include "sgx_bridge_km.h"
36 #include "sgxinfo.h"
37 #include "sgxinfokm.h"
38 #include "mmu.h"
39
40 #define UINT32_MAX_VALUE        0xFFFFFFFFUL
41
42 struct MMU_PT_INFO {
43         void *hPTPageOSMemHandle;
44         void *PTPageCpuVAddr;
45         u32 ui32ValidPTECount;
46 };
47
48 struct MMU_CONTEXT {
49         struct PVRSRV_DEVICE_NODE *psDeviceNode;
50         void *pvPDCpuVAddr;
51         struct IMG_DEV_PHYADDR sPDDevPAddr;
52         void *hPDOSMemHandle;
53         struct MMU_PT_INFO *apsPTInfoList[1024];
54         struct PVRSRV_SGXDEV_INFO *psDevInfo;
55         struct MMU_CONTEXT *psNext;
56 };
57
58 struct MMU_HEAP {
59         struct MMU_CONTEXT *psMMUContext;
60
61         u32 ui32PTBaseIndex;
62         u32 ui32PTPageCount;
63         u32 ui32PTEntryCount;
64
65         struct RA_ARENA *psVMArena;
66
67         struct DEV_ARENA_DESCRIPTOR *psDevArena;
68 };
69
70
71 #if defined(PDUMP)
72 static void MMU_PDumpPageTables(struct MMU_HEAP *pMMUHeap,
73                     struct IMG_DEV_VIRTADDR DevVAddr, size_t uSize,
74                     IMG_BOOL bForUnmap, void *hUniqueTag);
75 #endif
76
77 #define PAGE_TEST                                       0
78
79
80 void MMU_InvalidateDirectoryCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
81 {
82         psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;
83 }
84
85 static void MMU_InvalidatePageTableCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
86 {
87         psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;
88 }
89
90 static IMG_BOOL _AllocPageTables(struct MMU_HEAP *pMMUHeap)
91 {
92         PVR_DPF(PVR_DBG_MESSAGE, "_AllocPageTables()");
93
94         PVR_ASSERT(pMMUHeap != NULL);
95         PVR_ASSERT(HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE);
96
97         if (pMMUHeap == NULL) {
98                 PVR_DPF(PVR_DBG_ERROR, "_AllocPageTables: invalid parameter");
99                 return IMG_FALSE;
100         }
101
102         pMMUHeap->ui32PTEntryCount =
103             pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT;
104
105         pMMUHeap->ui32PTBaseIndex =
106             (pMMUHeap->psDevArena->BaseDevVAddr.
107              uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >>
108                                                         SGX_MMU_PAGE_SHIFT;
109
110         pMMUHeap->ui32PTPageCount =
111             (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - 1) >>
112                                                         SGX_MMU_PT_SHIFT;
113
114         return IMG_TRUE;
115 }
116
117 static void _DeferredFreePageTable(struct MMU_HEAP *pMMUHeap, u32 ui32PTIndex)
118 {
119         u32 *pui32PDEntry;
120         u32 i;
121         u32 ui32PDIndex;
122         struct SYS_DATA *psSysData;
123         struct MMU_PT_INFO **ppsPTInfoList;
124
125         if (SysAcquireData(&psSysData) != PVRSRV_OK) {
126                 PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePageTables: "
127                                         "ERROR call to SysAcquireData failed");
128                 return;
129         }
130
131         ui32PDIndex =
132             pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
133                                                           SGX_MMU_PT_SHIFT);
134
135         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
136
137         {
138                 PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == NULL ||
139                            ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount ==
140                                                                          0);
141         }
142
143         PDUMPCOMMENT("Free page table (page count == %08X)",
144                      pMMUHeap->ui32PTPageCount);
145         if (ppsPTInfoList[ui32PTIndex]
146             && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
147                 PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
148                                    ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr,
149                                    SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
150
151         switch (pMMUHeap->psDevArena->DevMemHeapType) {
152         case DEVICE_MEMORY_HEAP_SHARED:
153         case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
154                 {
155                         struct MMU_CONTEXT *psMMUContext =
156                           (struct MMU_CONTEXT *)
157                             pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
158
159                         while (psMMUContext) {
160                                 pui32PDEntry =
161                                     (u32 *) psMMUContext->pvPDCpuVAddr;
162                                 pui32PDEntry += ui32PDIndex;
163                                 pui32PDEntry[ui32PTIndex] = 0;
164                                 PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
165                                           (void *) &
166                                           pui32PDEntry[ui32PTIndex],
167                                           sizeof(u32), 0, IMG_FALSE,
168                                           PDUMP_PT_UNIQUETAG,
169                                           PDUMP_PT_UNIQUETAG);
170                                 psMMUContext = psMMUContext->psNext;
171                         }
172                         break;
173                 }
174         case DEVICE_MEMORY_HEAP_PERCONTEXT:
175         case DEVICE_MEMORY_HEAP_KERNEL:
176                 {
177
178                         pui32PDEntry =
179                             (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
180                         pui32PDEntry += ui32PDIndex;
181                         pui32PDEntry[ui32PTIndex] = 0;
182                         PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
183                                   (void *) &pui32PDEntry[ui32PTIndex],
184                                   sizeof(u32), 0, IMG_FALSE,
185                                   PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
186                         break;
187                 }
188         default:
189                 {
190                         PVR_DPF(PVR_DBG_ERROR,
191                         "_DeferredFreePagetable: ERROR invalid heap type");
192                         return;
193                 }
194         }
195
196         if (ppsPTInfoList[ui32PTIndex] != NULL) {
197                 if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != NULL) {
198                         u32 *pui32Tmp;
199
200                         pui32Tmp =
201                             (u32 *) ppsPTInfoList[ui32PTIndex]->
202                             PTPageCpuVAddr;
203
204                         for (i = 0;
205                              (i < pMMUHeap->ui32PTEntryCount) && (i < 1024);
206                              i++)
207                                 pui32Tmp[i] = 0;
208
209                         if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
210                             psLocalDevMemArena == NULL) {
211                                 OSFreePages(PVRSRV_HAP_WRITECOMBINE |
212                                             PVRSRV_HAP_KERNEL_ONLY,
213                                             SGX_MMU_PAGE_SIZE,
214                                             ppsPTInfoList[ui32PTIndex]->
215                                                     PTPageCpuVAddr,
216                                             ppsPTInfoList[ui32PTIndex]->
217                                                     hPTPageOSMemHandle);
218                         } else {
219                                 struct IMG_SYS_PHYADDR sSysPAddr;
220                                 struct IMG_CPU_PHYADDR sCpuPAddr;
221
222                                 sCpuPAddr =
223                                     OSMapLinToCPUPhys(ppsPTInfoList
224                                                       [ui32PTIndex]->
225                                                       PTPageCpuVAddr);
226                                 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
227
228                                 OSUnMapPhysToLin((void __force __iomem *)
229                                                    ppsPTInfoList[ui32PTIndex]->
230                                                         PTPageCpuVAddr,
231                                                  SGX_MMU_PAGE_SIZE,
232                                                  PVRSRV_HAP_WRITECOMBINE |
233                                                          PVRSRV_HAP_KERNEL_ONLY,
234                                                  ppsPTInfoList[ui32PTIndex]->
235                                                          hPTPageOSMemHandle);
236
237                                 RA_Free(pMMUHeap->psDevArena->
238                                                 psDeviceMemoryHeapInfo->
239                                                         psLocalDevMemArena,
240                                         sSysPAddr.uiAddr, IMG_FALSE);
241                         }
242
243                         pMMUHeap->ui32PTEntryCount -= i;
244                 } else {
245                         pMMUHeap->ui32PTEntryCount -= 1024;
246                 }
247
248                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
249                           sizeof(struct MMU_PT_INFO),
250                           ppsPTInfoList[ui32PTIndex], NULL);
251                 ppsPTInfoList[ui32PTIndex] = NULL;
252         } else {
253                 pMMUHeap->ui32PTEntryCount -= 1024;
254         }
255
256         PDUMPCOMMENT("Finished free page table (page count == %08X)",
257                      pMMUHeap->ui32PTPageCount);
258 }
259
260 static void _DeferredFreePageTables(struct MMU_HEAP *pMMUHeap)
261 {
262         u32 i;
263
264         for (i = 0; i < pMMUHeap->ui32PTPageCount; i++)
265                 _DeferredFreePageTable(pMMUHeap, i);
266         MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
267 }
268
269 static IMG_BOOL _DeferredAllocPagetables(struct MMU_HEAP *pMMUHeap,
270                                 struct IMG_DEV_VIRTADDR DevVAddr, u32 ui32Size)
271 {
272         u32 ui32PTPageCount;
273         u32 ui32PDIndex;
274         u32 i;
275         u32 *pui32PDEntry;
276         struct MMU_PT_INFO **ppsPTInfoList;
277         struct SYS_DATA *psSysData;
278         struct IMG_DEV_VIRTADDR sHighDevVAddr;
279
280         PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE));
281
282         if (SysAcquireData(&psSysData) != PVRSRV_OK)
283                 return IMG_FALSE;
284
285         ui32PDIndex =
286             DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
287
288         if ((UINT32_MAX_VALUE - DevVAddr.uiAddr) <
289             (ui32Size + (1 << (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT)) - 1)) {
290
291                 sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
292         } else {
293                 sHighDevVAddr.uiAddr = DevVAddr.uiAddr + ui32Size +
294                                         (1 << (SGX_MMU_PAGE_SHIFT +
295                                                SGX_MMU_PT_SHIFT)) - 1;
296         }
297
298         ui32PTPageCount =
299             sHighDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
300
301         ui32PTPageCount -= ui32PDIndex;
302
303         pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
304         pui32PDEntry += ui32PDIndex;
305
306         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
307
308         PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
309         PDUMPCOMMENT("Page directory mods (page count == %08X)",
310                      ui32PTPageCount);
311
312         for (i = 0; i < ui32PTPageCount; i++) {
313                 if (ppsPTInfoList[i] == NULL) {
314                         OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
315                                    sizeof(struct MMU_PT_INFO),
316                                    (void **) &ppsPTInfoList[i], NULL);
317                         if (ppsPTInfoList[i] == NULL) {
318                                 PVR_DPF(PVR_DBG_ERROR,
319                                         "_DeferredAllocPagetables: "
320                                         "ERROR call to OSAllocMem failed");
321                                 return IMG_FALSE;
322                         }
323                         OSMemSet(ppsPTInfoList[i], 0,
324                                  sizeof(struct MMU_PT_INFO));
325                 }
326
327                 if (ppsPTInfoList[i]->hPTPageOSMemHandle == NULL &&
328                     ppsPTInfoList[i]->PTPageCpuVAddr == NULL) {
329                         struct IMG_CPU_PHYADDR sCpuPAddr;
330                         struct IMG_DEV_PHYADDR sDevPAddr;
331
332                         PVR_ASSERT(pui32PDEntry[i] == 0);
333
334                         if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
335                             psLocalDevMemArena == NULL) {
336                                 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
337                                                      PVRSRV_HAP_KERNEL_ONLY,
338                                              SGX_MMU_PAGE_SIZE,
339                                              SGX_MMU_PAGE_SIZE,
340                                              (void **)&ppsPTInfoList[i]->
341                                                 PTPageCpuVAddr,
342                                              &ppsPTInfoList[i]->
343                                                 hPTPageOSMemHandle) !=
344                                     PVRSRV_OK) {
345                                         PVR_DPF(PVR_DBG_ERROR,
346                                            "_DeferredAllocPagetables: "
347                                            "ERROR call to OSAllocPages failed");
348                                         return IMG_FALSE;
349                                 }
350
351                                 if (ppsPTInfoList[i]->PTPageCpuVAddr) {
352                                         sCpuPAddr =
353                                             OSMapLinToCPUPhys(ppsPTInfoList[i]->
354                                                               PTPageCpuVAddr);
355                                 } else {
356                                         sCpuPAddr =
357                                             OSMemHandleToCpuPAddr(
358                                                 ppsPTInfoList[i]->
359                                                           hPTPageOSMemHandle,
360                                                 0);
361                                 }
362                                 sDevPAddr =
363                                     SysCpuPAddrToDevPAddr
364                                             (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
365                         } else {
366                                 struct IMG_SYS_PHYADDR sSysPAddr;
367
368                                 if (RA_Alloc(pMMUHeap->psDevArena->
369                                      psDeviceMemoryHeapInfo->psLocalDevMemArena,
370                                      SGX_MMU_PAGE_SIZE, NULL, 0,
371                                      SGX_MMU_PAGE_SIZE,
372                                      &(sSysPAddr.uiAddr)) != IMG_TRUE) {
373                                         PVR_DPF(PVR_DBG_ERROR,
374                                                "_DeferredAllocPagetables: "
375                                                "ERROR call to RA_Alloc failed");
376                                         return IMG_FALSE;
377                                 }
378
379                                 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
380                                 ppsPTInfoList[i]->PTPageCpuVAddr =
381                                     (void __force *)
382                                     OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
383                                                    PVRSRV_HAP_WRITECOMBINE |
384                                                    PVRSRV_HAP_KERNEL_ONLY,
385                                                    &ppsPTInfoList[i]->
386                                                    hPTPageOSMemHandle);
387                                 if (!ppsPTInfoList[i]->PTPageCpuVAddr) {
388                                         PVR_DPF(PVR_DBG_ERROR,
389                                              "_DeferredAllocPagetables: "
390                                              "ERROR failed to map page tables");
391                                         return IMG_FALSE;
392                                 }
393
394                                 sDevPAddr = SysCpuPAddrToDevPAddr
395                                             (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
396
397                         }
398
399
400                         OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0,
401                                  SGX_MMU_PAGE_SIZE);
402
403                         PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
404                                              ppsPTInfoList[i]->PTPageCpuVAddr,
405                                              SGX_MMU_PAGE_SIZE,
406                                              PDUMP_PT_UNIQUETAG);
407
408                         PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
409                                   ppsPTInfoList[i]->PTPageCpuVAddr,
410                                   SGX_MMU_PAGE_SIZE, 0, IMG_TRUE,
411                                   PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
412
413                         switch (pMMUHeap->psDevArena->DevMemHeapType) {
414                         case DEVICE_MEMORY_HEAP_SHARED:
415                         case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
416                                 {
417                                         struct MMU_CONTEXT *psMMUContext =
418                                             (struct MMU_CONTEXT *)pMMUHeap->
419                                                     psMMUContext->psDevInfo->
420                                                             pvMMUContextList;
421
422                                         while (psMMUContext) {
423                                                 pui32PDEntry =
424                                                     (u32 *)psMMUContext->
425                                                                 pvPDCpuVAddr;
426                                                 pui32PDEntry += ui32PDIndex;
427
428                                                 pui32PDEntry[i] =
429                                                     sDevPAddr.uiAddr |
430                                                         SGX_MMU_PDE_VALID;
431
432                                                 PDUMPMEM2
433                                                     (PVRSRV_DEVICE_TYPE_SGX,
434                                                      (void *)&pui32PDEntry[i],
435                                                      sizeof(u32), 0,
436                                                      IMG_FALSE,
437                                                      PDUMP_PD_UNIQUETAG,
438                                                      PDUMP_PT_UNIQUETAG);
439
440                                                 psMMUContext =
441                                                     psMMUContext->psNext;
442                                         }
443                                         break;
444                                 }
445                         case DEVICE_MEMORY_HEAP_PERCONTEXT:
446                         case DEVICE_MEMORY_HEAP_KERNEL:
447                                 {
448                                         pui32PDEntry[i] = sDevPAddr.uiAddr |
449                                                              SGX_MMU_PDE_VALID;
450
451                                         PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
452                                                   (void *)&pui32PDEntry[i],
453                                                   sizeof(u32), 0,
454                                                   IMG_FALSE, PDUMP_PD_UNIQUETAG,
455                                                   PDUMP_PT_UNIQUETAG);
456
457                                         break;
458                                 }
459                         default:
460                                 {
461                                         PVR_DPF(PVR_DBG_ERROR,
462                                                 "_DeferredAllocPagetables: "
463                                                 "ERROR invalid heap type");
464                                         return IMG_FALSE;
465                                 }
466                         }
467
468
469                         MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->
470                                                      psDevInfo);
471                 } else {
472
473                         PVR_ASSERT(pui32PDEntry[i] != 0);
474                 }
475         }
476
477         return IMG_TRUE;
478 }
479
480 enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode,
481                             struct MMU_CONTEXT **ppsMMUContext,
482                             struct IMG_DEV_PHYADDR *psPDDevPAddr)
483 {
484         u32 *pui32Tmp;
485         u32 i;
486         void *pvPDCpuVAddr;
487         struct IMG_DEV_PHYADDR sPDDevPAddr;
488         struct IMG_CPU_PHYADDR sCpuPAddr;
489         struct MMU_CONTEXT *psMMUContext;
490         void *hPDOSMemHandle;
491         struct SYS_DATA *psSysData;
492         struct PVRSRV_SGXDEV_INFO *psDevInfo;
493
494         PVR_DPF(PVR_DBG_MESSAGE, "MMU_Initialise");
495
496         if (SysAcquireData(&psSysData) != PVRSRV_OK) {
497                 PVR_DPF(PVR_DBG_ERROR,
498                          "MMU_Initialise: ERROR call to SysAcquireData failed");
499                 return PVRSRV_ERROR_GENERIC;
500         }
501
502         if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
503                    sizeof(struct MMU_CONTEXT), (void **) &psMMUContext, NULL)
504                         != PVRSRV_OK) {
505                 PVR_DPF(PVR_DBG_ERROR,
506                          "MMU_Initialise: ERROR call to OSAllocMem failed");
507                 return PVRSRV_ERROR_GENERIC;
508         }
509         OSMemSet(psMMUContext, 0, sizeof(struct MMU_CONTEXT));
510
511         psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
512         psMMUContext->psDevInfo = psDevInfo;
513
514         psMMUContext->psDeviceNode = psDeviceNode;
515
516         if (psDeviceNode->psLocalDevMemArena == NULL) {
517                 if (OSAllocPages
518                     (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
519                      SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr,
520                      &hPDOSMemHandle) != PVRSRV_OK) {
521                         PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
522                                         "ERROR call to OSAllocPages failed");
523                         return PVRSRV_ERROR_GENERIC;
524                 }
525
526                 if (pvPDCpuVAddr)
527                         sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
528                 else
529                         sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
530                 sPDDevPAddr =
531                     SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
532         } else {
533                 struct IMG_SYS_PHYADDR sSysPAddr;
534
535                 if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
536                              SGX_MMU_PAGE_SIZE, NULL, 0, SGX_MMU_PAGE_SIZE,
537                              &(sSysPAddr.uiAddr)) != IMG_TRUE) {
538                         PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
539                                         "ERROR call to RA_Alloc failed");
540                         return PVRSRV_ERROR_GENERIC;
541                 }
542
543                 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
544                 sPDDevPAddr =
545                     SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
546                 pvPDCpuVAddr = (void __force *)
547                     OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
548                                    PVRSRV_HAP_WRITECOMBINE |
549                                    PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle);
550                 if (!pvPDCpuVAddr) {
551                         PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
552                                         "ERROR failed to map page tables");
553                         return PVRSRV_ERROR_GENERIC;
554                 }
555         }
556
557         PDUMPCOMMENT("Alloc page directory");
558
559         PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr,
560                              SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
561
562         if (pvPDCpuVAddr) {
563                 pui32Tmp = (u32 *) pvPDCpuVAddr;
564         } else {
565                 PVR_DPF(PVR_DBG_ERROR,
566                          "MMU_Initialise: pvPDCpuVAddr invalid");
567                 return PVRSRV_ERROR_GENERIC;
568         }
569
570         for (i = 0; i < SGX_MMU_PD_SIZE; i++)
571                 pui32Tmp[i] = 0;
572
573         PDUMPCOMMENT("Page directory contents");
574         PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0,
575                   IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
576
577         psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
578         psMMUContext->sPDDevPAddr = sPDDevPAddr;
579         psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
580
581         *ppsMMUContext = psMMUContext;
582
583         *psPDDevPAddr = sPDDevPAddr;
584
585         psMMUContext->psNext = (struct MMU_CONTEXT *)
586                                                 psDevInfo->pvMMUContextList;
587         psDevInfo->pvMMUContextList = (void *) psMMUContext;
588
589
590         return PVRSRV_OK;
591 }
592
593 void MMU_Finalise(struct MMU_CONTEXT *psMMUContext)
594 {
595         u32 *pui32Tmp, i;
596         struct SYS_DATA *psSysData;
597         struct MMU_CONTEXT **ppsMMUContext;
598
599         if (SysAcquireData(&psSysData) != PVRSRV_OK) {
600                 PVR_DPF(PVR_DBG_ERROR,
601                          "MMU_Finalise: ERROR call to SysAcquireData failed");
602                 return;
603         }
604
605         PDUMPCOMMENT("Free page directory");
606         PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->pvPDCpuVAddr,
607                            SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
608
609         pui32Tmp = (u32 *) psMMUContext->pvPDCpuVAddr;
610
611         for (i = 0; i < SGX_MMU_PD_SIZE; i++)
612                 pui32Tmp[i] = 0;
613
614         if (psMMUContext->psDeviceNode->psLocalDevMemArena == NULL) {
615                 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
616                             SGX_MMU_PAGE_SIZE,
617                             psMMUContext->pvPDCpuVAddr,
618                             psMMUContext->hPDOSMemHandle);
619
620         } else {
621                 struct IMG_SYS_PHYADDR sSysPAddr;
622                 struct IMG_CPU_PHYADDR sCpuPAddr;
623
624                 sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
625                 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
626
627                 OSUnMapPhysToLin((void __iomem __force *)
628                                         psMMUContext->pvPDCpuVAddr,
629                                  SGX_MMU_PAGE_SIZE,
630                                  PVRSRV_HAP_WRITECOMBINE |
631                                                 PVRSRV_HAP_KERNEL_ONLY,
632                                  psMMUContext->hPDOSMemHandle);
633
634                 RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena,
635                         sSysPAddr.uiAddr, IMG_FALSE);
636
637         }
638
639         PVR_DPF(PVR_DBG_MESSAGE, "MMU_Finalise");
640
641         ppsMMUContext =
642             (struct MMU_CONTEXT **) &psMMUContext->psDevInfo->pvMMUContextList;
643         while (*ppsMMUContext) {
644                 if (*ppsMMUContext == psMMUContext) {
645
646                         *ppsMMUContext = psMMUContext->psNext;
647                         break;
648                 }
649
650                 ppsMMUContext = &((*ppsMMUContext)->psNext);
651         }
652
653         OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
654                   psMMUContext, NULL);
655 }
656
657 void MMU_InsertHeap(struct MMU_CONTEXT *psMMUContext,
658                     struct MMU_HEAP *psMMUHeap)
659 {
660         u32 *pui32PDCpuVAddr = (u32 *)psMMUContext->pvPDCpuVAddr;
661         u32 *pui32KernelPDCpuVAddr = (u32 *)
662                                         psMMUHeap->psMMUContext->pvPDCpuVAddr;
663         u32 ui32PDEntry;
664         IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
665
666         pui32PDCpuVAddr +=
667             psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
668                                                            SGX_MMU_PT_SHIFT);
669         pui32KernelPDCpuVAddr +=
670             psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
671                                                            SGX_MMU_PT_SHIFT);
672
673         PDUMPCOMMENT("Page directory shared heap range copy");
674
675         for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PTPageCount;
676              ui32PDEntry++) {
677
678                 PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
679
680                 pui32PDCpuVAddr[ui32PDEntry] =
681                     pui32KernelPDCpuVAddr[ui32PDEntry];
682                 if (pui32PDCpuVAddr[ui32PDEntry]) {
683                         PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
684                                   (void *) &pui32PDCpuVAddr[ui32PDEntry],
685                                   sizeof(u32), 0, IMG_FALSE,
686                                   PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
687
688                         bInvalidateDirectoryCache = IMG_TRUE;
689                 }
690         }
691
692         if (bInvalidateDirectoryCache)
693                 MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
694 }
695
696 static void MMU_UnmapPagesAndFreePTs(struct MMU_HEAP *psMMUHeap,
697                          struct IMG_DEV_VIRTADDR sDevVAddr,
698                          u32 ui32PageCount, void *hUniqueTag)
699 {
700         u32 uPageSize = HOST_PAGESIZE();
701         struct IMG_DEV_VIRTADDR sTmpDevVAddr;
702         u32 i;
703         u32 ui32PDIndex;
704         u32 ui32PTIndex;
705         u32 *pui32Tmp;
706         IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
707
708 #if !defined(PDUMP)
709         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
710 #endif
711
712         sTmpDevVAddr = sDevVAddr;
713
714         for (i = 0; i < ui32PageCount; i++) {
715                 struct MMU_PT_INFO **ppsPTInfoList;
716
717                 ui32PDIndex =
718                     sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
719                                             SGX_MMU_PT_SHIFT);
720
721                 ppsPTInfoList =
722                     &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
723
724                 {
725                         ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK)
726                                                 >> SGX_MMU_PAGE_SHIFT;
727
728                         if (!ppsPTInfoList[0]) {
729                                 PVR_DPF(PVR_DBG_MESSAGE,
730                                         "MMU_UnmapPagesAndFreePTs: "
731                                         "Invalid PT for alloc at VAddr:0x%08lX "
732                                         "(VaddrIni:0x%08lX AllocPage:%u) "
733                                         "PDIdx:%u PTIdx:%u",
734                                          sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
735                                          i, ui32PDIndex, ui32PTIndex);
736
737                                 sTmpDevVAddr.uiAddr += uPageSize;
738
739                                 continue;
740                         }
741
742                         pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
743
744                         if (!pui32Tmp)
745                                 continue;
746
747                         if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) {
748                                 ppsPTInfoList[0]->ui32ValidPTECount--;
749                         } else {
750                                 PVR_DPF(PVR_DBG_MESSAGE,
751                                          "MMU_UnmapPagesAndFreePTs: "
752                                          "Page is already invalid for alloc at "
753                                          "VAddr:0x%08lX "
754                                          "(VAddrIni:0x%08lX AllocPage:%u) "
755                                          "PDIdx:%u PTIdx:%u",
756                                          sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
757                                          i, ui32PDIndex, ui32PTIndex);
758                         }
759
760                         PVR_ASSERT((s32)ppsPTInfoList[0]->ui32ValidPTECount >=
761                                                                         0);
762                         pui32Tmp[ui32PTIndex] = 0;
763                 }
764
765                 if (ppsPTInfoList[0]
766                     && ppsPTInfoList[0]->ui32ValidPTECount == 0) {
767                         _DeferredFreePageTable(psMMUHeap,
768                                                ui32PDIndex - (psMMUHeap->
769                                                    ui32PTBaseIndex >>
770                                                        SGX_MMU_PT_SHIFT));
771                         bInvalidateDirectoryCache = IMG_TRUE;
772                 }
773
774                 sTmpDevVAddr.uiAddr += uPageSize;
775         }
776
777         if (bInvalidateDirectoryCache) {
778                 MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->
779                                                              psDevInfo);
780         } else {
781                 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->
782                                                              psDevInfo);
783         }
784
785 #if defined(PDUMP)
786         MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
787                             IMG_TRUE, hUniqueTag);
788 #endif
789 }
790
791 static void MMU_FreePageTables(void *pvMMUHeap, u32 ui32Start, u32 ui32End,
792                                void *hUniqueTag)
793 {
794         struct MMU_HEAP *pMMUHeap = (struct MMU_HEAP *)pvMMUHeap;
795         struct IMG_DEV_VIRTADDR Start;
796
797         Start.uiAddr = ui32Start;
798
799         MMU_UnmapPagesAndFreePTs(pMMUHeap, Start,
800                                  (ui32End - ui32Start) / SGX_MMU_PAGE_SIZE,
801                                  hUniqueTag);
802 }
803
804 struct MMU_HEAP *MMU_Create(struct MMU_CONTEXT *psMMUContext,
805                             struct DEV_ARENA_DESCRIPTOR *psDevArena,
806                             struct RA_ARENA **ppsVMArena)
807 {
808         struct MMU_HEAP *pMMUHeap;
809         IMG_BOOL bRes;
810
811         PVR_ASSERT(psDevArena != NULL);
812
813         if (psDevArena == NULL) {
814                 PVR_DPF(PVR_DBG_ERROR, "MMU_Create: invalid parameter");
815                 return NULL;
816         }
817
818         if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
819                    sizeof(struct MMU_HEAP), (void **)&pMMUHeap, NULL)
820                         != PVRSRV_OK) {
821                 PVR_DPF(PVR_DBG_ERROR,
822                          "MMU_Create: ERROR call to OSAllocMem failed");
823                 return NULL;
824         }
825
826         pMMUHeap->psMMUContext = psMMUContext;
827         pMMUHeap->psDevArena = psDevArena;
828
829         bRes = _AllocPageTables(pMMUHeap);
830         if (!bRes) {
831                 PVR_DPF(PVR_DBG_ERROR,
832                          "MMU_Create: ERROR call to _AllocPageTables failed");
833                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
834                           pMMUHeap, NULL);
835                 return NULL;
836         }
837
838         pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
839                                         psDevArena->BaseDevVAddr.uiAddr,
840                                         psDevArena->ui32Size, NULL,
841                                         SGX_MMU_PAGE_SIZE, NULL, NULL,
842                                         MMU_FreePageTables, pMMUHeap);
843
844         if (pMMUHeap->psVMArena == NULL) {
845                 PVR_DPF(PVR_DBG_ERROR,
846                          "MMU_Create: ERROR call to RA_Create failed");
847                 _DeferredFreePageTables(pMMUHeap);
848                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
849                           pMMUHeap, NULL);
850                 return NULL;
851         }
852
853         *ppsVMArena = pMMUHeap->psVMArena;
854
855         return pMMUHeap;
856 }
857
858 void MMU_Delete(struct MMU_HEAP *pMMUHeap)
859 {
860         if (pMMUHeap != NULL) {
861                 PVR_DPF(PVR_DBG_MESSAGE, "MMU_Delete");
862
863                 if (pMMUHeap->psVMArena)
864                         RA_Delete(pMMUHeap->psVMArena);
865                 _DeferredFreePageTables(pMMUHeap);
866
867                 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
868                           pMMUHeap, NULL);
869         }
870 }
871
872 IMG_BOOL MMU_Alloc(struct MMU_HEAP *pMMUHeap, size_t uSize, u32 uFlags,
873                    u32 uDevVAddrAlignment, struct IMG_DEV_VIRTADDR *psDevVAddr)
874 {
875         IMG_BOOL bStatus;
876
877         PVR_DPF(PVR_DBG_MESSAGE,
878                  "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
879                  uSize, uFlags, uDevVAddrAlignment);
880
881         if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) {
882                 bStatus = RA_Alloc(pMMUHeap->psVMArena, uSize, NULL, 0,
883                                    uDevVAddrAlignment, &(psDevVAddr->uiAddr));
884                 if (!bStatus) {
885                         PVR_DPF(PVR_DBG_ERROR,
886                                  "MMU_Alloc: RA_Alloc of VMArena failed");
887                         return bStatus;
888                 }
889         }
890
891         bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
892
893
894         if (!bStatus) {
895                 PVR_DPF(PVR_DBG_ERROR,
896                          "MMU_Alloc: _DeferredAllocPagetables failed");
897                 if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
898                         RA_Free(pMMUHeap->psVMArena, psDevVAddr->uiAddr,
899                                 IMG_FALSE);
900         }
901
902         return bStatus;
903 }
904
905 void MMU_Free(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
906               u32 ui32Size)
907 {
908         PVR_ASSERT(pMMUHeap != NULL);
909
910         if (pMMUHeap == NULL) {
911                 PVR_DPF(PVR_DBG_ERROR, "MMU_Free: invalid parameter");
912                 return;
913         }
914
915         PVR_DPF(PVR_DBG_MESSAGE,
916                  "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap,
917                  DevVAddr.uiAddr);
918
919         if ((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
920             (DevVAddr.uiAddr + ui32Size <=
921              pMMUHeap->psDevArena->BaseDevVAddr.uiAddr +
922              pMMUHeap->psDevArena->ui32Size)) {
923                 RA_Free(pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
924                 return;
925         }
926
927         BUG();
928
929         PVR_DPF(PVR_DBG_ERROR,
930                  "MMU_Free: Couldn't find DevVAddr %08X in a DevArena",
931                  DevVAddr.uiAddr);
932 }
933
934 void MMU_Enable(struct MMU_HEAP *pMMUHeap)
935 {
936         PVR_UNREFERENCED_PARAMETER(pMMUHeap);
937
938 }
939
940 void MMU_Disable(struct MMU_HEAP *pMMUHeap)
941 {
942         PVR_UNREFERENCED_PARAMETER(pMMUHeap);
943
944 }
945
946 #if defined(PDUMP)
947 static void MMU_PDumpPageTables(struct MMU_HEAP *pMMUHeap,
948                     struct IMG_DEV_VIRTADDR DevVAddr,
949                     size_t uSize, IMG_BOOL bForUnmap, void *hUniqueTag)
950 {
951         u32 ui32NumPTEntries;
952         u32 ui32PTIndex;
953         u32 *pui32PTEntry;
954
955         struct MMU_PT_INFO **ppsPTInfoList;
956         u32 ui32PDIndex;
957         u32 ui32PTDumpCount;
958
959         ui32NumPTEntries =
960             (uSize + SGX_MMU_PAGE_SIZE - 1) >> SGX_MMU_PAGE_SHIFT;
961
962         ui32PDIndex =
963             DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
964
965         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
966
967         ui32PTIndex = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
968
969         PDUMPCOMMENT("Page table mods (num entries == %08X) %s",
970                      ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
971
972         while (ui32NumPTEntries > 0) {
973                 struct MMU_PT_INFO *psPTInfo = *ppsPTInfoList++;
974
975                 if (ui32NumPTEntries <= 1024 - ui32PTIndex)
976                         ui32PTDumpCount = ui32NumPTEntries;
977                 else
978                         ui32PTDumpCount = 1024 - ui32PTIndex;
979
980                 if (psPTInfo) {
981                         pui32PTEntry = (u32 *)psPTInfo->PTPageCpuVAddr;
982                         PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
983                                   (void *)&pui32PTEntry[ui32PTIndex],
984                                   ui32PTDumpCount * sizeof(u32), 0,
985                                   IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
986                 }
987
988                 ui32NumPTEntries -= ui32PTDumpCount;
989
990                 ui32PTIndex = 0;
991         }
992
993         PDUMPCOMMENT("Finished page table mods %s",
994                      bForUnmap ? "(for unmap)" : "");
995 }
996 #endif
997
998 static void MMU_MapPage(struct MMU_HEAP *pMMUHeap,
999             struct IMG_DEV_VIRTADDR DevVAddr,
1000             struct IMG_DEV_PHYADDR DevPAddr, u32 ui32MemFlags)
1001 {
1002         u32 ui32Index;
1003         u32 *pui32Tmp;
1004         u32 ui32MMUFlags = 0;
1005         struct MMU_PT_INFO **ppsPTInfoList;
1006
1007         if (((PVRSRV_MEM_READ | PVRSRV_MEM_WRITE) & ui32MemFlags) ==
1008             (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE))
1009                 ui32MMUFlags = 0;
1010         else if (PVRSRV_MEM_READ & ui32MemFlags)
1011                 ui32MMUFlags |= SGX_MMU_PTE_READONLY;
1012         else if (PVRSRV_MEM_WRITE & ui32MemFlags)
1013                 ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
1014
1015         if (PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
1016                 ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
1017
1018         if (PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
1019                 ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
1020
1021         ui32Index = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
1022
1023         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1024
1025         ui32Index = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1026
1027         pui32Tmp = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1028
1029
1030         if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
1031                 PVR_DPF(PVR_DBG_ERROR,
1032                                 "MMU_MapPage: "
1033                                 "Page is already valid for alloc at "
1034                                 "VAddr:0x%08lX PDIdx:%u PTIdx:%u",
1035                          DevVAddr.uiAddr,
1036                          DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1037                                              SGX_MMU_PT_SHIFT), ui32Index);
1038
1039         PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
1040
1041         ppsPTInfoList[0]->ui32ValidPTECount++;
1042
1043         pui32Tmp[ui32Index] = (DevPAddr.uiAddr & SGX_MMU_PTE_ADDR_MASK)
1044             | SGX_MMU_PTE_VALID | ui32MMUFlags;
1045 }
1046
1047 void MMU_MapScatter(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1048                     struct IMG_SYS_PHYADDR *psSysAddr, size_t uSize,
1049                     u32 ui32MemFlags, void *hUniqueTag)
1050 {
1051 #if defined(PDUMP)
1052         struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1053 #endif
1054         u32 uCount, i;
1055         struct IMG_DEV_PHYADDR DevPAddr;
1056
1057         PVR_ASSERT(pMMUHeap != NULL);
1058
1059 #if defined(PDUMP)
1060         MapBaseDevVAddr = DevVAddr;
1061 #else
1062         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1063 #endif
1064
1065         for (i = 0, uCount = 0; uCount < uSize;
1066              i++, uCount += SGX_MMU_PAGE_SIZE) {
1067                 struct IMG_SYS_PHYADDR sSysAddr;
1068
1069                 sSysAddr = psSysAddr[i];
1070
1071                 DevPAddr =
1072                     SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
1073
1074                 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1075                 DevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
1076
1077                 PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapScatter: "
1078                                 "devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
1079                          DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize);
1080         }
1081
1082 #if defined(PDUMP)
1083         MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1084                             hUniqueTag);
1085 #endif
1086 }
1087
1088 void MMU_MapPages(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
1089                   struct IMG_SYS_PHYADDR SysPAddr, size_t uSize,
1090                   u32 ui32MemFlags, void *hUniqueTag)
1091 {
1092         struct IMG_DEV_PHYADDR DevPAddr;
1093 #if defined(PDUMP)
1094         struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
1095 #endif
1096         u32 uCount;
1097         u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1098         u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1099
1100         PVR_ASSERT(pMMUHeap != NULL);
1101
1102         PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapPages: "
1103                  "mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
1104                  pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize);
1105
1106 #if defined(PDUMP)
1107         MapBaseDevVAddr = DevVAddr;
1108 #else
1109         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1110 #endif
1111
1112         DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
1113
1114         if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1115                 ui32PAdvance = 0;
1116
1117         for (uCount = 0; uCount < uSize; uCount += ui32VAdvance) {
1118                 MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1119                 DevVAddr.uiAddr += ui32VAdvance;
1120                 DevPAddr.uiAddr += ui32PAdvance;
1121         }
1122
1123 #if defined(PDUMP)
1124         MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
1125                             hUniqueTag);
1126 #endif
1127 }
1128
1129 void MMU_MapShadow(struct MMU_HEAP *pMMUHeap,
1130               struct IMG_DEV_VIRTADDR MapBaseDevVAddr,
1131               size_t uByteSize, void *CpuVAddr, void *hOSMemHandle,
1132               struct IMG_DEV_VIRTADDR *pDevVAddr, u32 ui32MemFlags,
1133               void *hUniqueTag)
1134 {
1135         u32 i;
1136         u32 uOffset = 0;
1137         struct IMG_DEV_VIRTADDR MapDevVAddr;
1138         u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
1139         u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
1140
1141 #if !defined(PDUMP)
1142         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1143 #endif
1144
1145         PVR_DPF(PVR_DBG_MESSAGE,
1146                  "MMU_MapShadow: %08X, 0x%x, %08X",
1147                  MapBaseDevVAddr.uiAddr, uByteSize, CpuVAddr);
1148
1149         PVR_ASSERT(((u32) CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1150         PVR_ASSERT(((u32) uByteSize & (SGX_MMU_PAGE_SIZE - 1)) == 0);
1151         pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
1152
1153         if (ui32MemFlags & PVRSRV_MEM_DUMMY)
1154                 ui32PAdvance = 0;
1155
1156         MapDevVAddr = MapBaseDevVAddr;
1157         for (i = 0; i < uByteSize; i += ui32VAdvance) {
1158                 struct IMG_CPU_PHYADDR CpuPAddr;
1159                 struct IMG_DEV_PHYADDR DevPAddr;
1160
1161                 if (CpuVAddr)
1162                         CpuPAddr =
1163                             OSMapLinToCPUPhys((void *)((u32)CpuVAddr +
1164                                                                     uOffset));
1165                 else
1166                         CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
1167                 DevPAddr =
1168                     SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
1169
1170                 PVR_DPF(PVR_DBG_MESSAGE, "0x%x: CpuVAddr=%08X, "
1171                                 "CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
1172                          uOffset, (u32)CpuVAddr + uOffset, CpuPAddr.uiAddr,
1173                          MapDevVAddr.uiAddr, DevPAddr.uiAddr);
1174
1175                 MMU_MapPage(pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
1176
1177                 MapDevVAddr.uiAddr += ui32VAdvance;
1178                 uOffset += ui32PAdvance;
1179         }
1180
1181 #if defined(PDUMP)
1182         MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE,
1183                             hUniqueTag);
1184 #endif
1185 }
1186
1187 void MMU_UnmapPages(struct MMU_HEAP *psMMUHeap,
1188                    struct IMG_DEV_VIRTADDR sDevVAddr, u32 ui32PageCount,
1189                    void *hUniqueTag)
1190 {
1191         u32 uPageSize = HOST_PAGESIZE();
1192         struct IMG_DEV_VIRTADDR sTmpDevVAddr;
1193         u32 i;
1194         u32 ui32PDIndex;
1195         u32 ui32PTIndex;
1196         u32 *pui32Tmp;
1197
1198 #if !defined(PDUMP)
1199         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1200 #endif
1201
1202         sTmpDevVAddr = sDevVAddr;
1203
1204         for (i = 0; i < ui32PageCount; i++) {
1205                 struct MMU_PT_INFO **ppsPTInfoList;
1206
1207                 ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1208                                                       SGX_MMU_PT_SHIFT);
1209
1210                 ppsPTInfoList = &psMMUHeap->psMMUContext->
1211                                                 apsPTInfoList[ui32PDIndex];
1212
1213                 ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >>
1214                                                         SGX_MMU_PAGE_SHIFT;
1215
1216                 if (!ppsPTInfoList[0]) {
1217                         PVR_DPF(PVR_DBG_ERROR,
1218                                 "MMU_UnmapPages: "
1219                                 "ERROR Invalid PT for alloc at VAddr:0x%08lX "
1220                                 "(VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u "
1221                                 "PTIdx:%u",
1222                                  sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1223                                  ui32PDIndex, ui32PTIndex);
1224
1225                         sTmpDevVAddr.uiAddr += uPageSize;
1226
1227                         continue;
1228                 }
1229
1230                 pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
1231
1232                 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
1233                         ppsPTInfoList[0]->ui32ValidPTECount--;
1234                 else
1235                         PVR_DPF(PVR_DBG_ERROR,
1236                                 "MMU_UnmapPages: Page is already invalid "
1237                                 "for alloc at VAddr:0x%08lX "
1238                                 "(VAddrIni:0x%08lX AllocPage:%u) "
1239                                 "PDIdx:%u PTIdx:%u",
1240                                  sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
1241                                  ui32PDIndex, ui32PTIndex);
1242
1243                 PVR_ASSERT((s32) ppsPTInfoList[0]->ui32ValidPTECount >= 0);
1244
1245                 pui32Tmp[ui32PTIndex] = 0;
1246
1247                 sTmpDevVAddr.uiAddr += uPageSize;
1248         }
1249
1250         MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
1251
1252 #if defined(PDUMP)
1253         MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
1254                             IMG_TRUE, hUniqueTag);
1255 #endif
1256 }
1257
1258 struct IMG_DEV_PHYADDR MMU_GetPhysPageAddr(struct MMU_HEAP *pMMUHeap,
1259                                         struct IMG_DEV_VIRTADDR sDevVPageAddr)
1260 {
1261         u32 *pui32PageTable;
1262         u32 ui32Index;
1263         struct IMG_DEV_PHYADDR sDevPAddr;
1264         struct MMU_PT_INFO **ppsPTInfoList;
1265
1266         ui32Index = sDevVPageAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
1267                                              SGX_MMU_PT_SHIFT);
1268
1269         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1270         if (!ppsPTInfoList[0]) {
1271                 PVR_DPF(PVR_DBG_ERROR,
1272                          "MMU_GetPhysPageAddr: Not mapped in at 0x%08x",
1273                          sDevVPageAddr.uiAddr);
1274                 sDevPAddr.uiAddr = 0;
1275                 return sDevPAddr;
1276         }
1277
1278         ui32Index =
1279             (sDevVPageAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
1280
1281         pui32PageTable = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
1282
1283         sDevPAddr.uiAddr = pui32PageTable[ui32Index];
1284
1285         sDevPAddr.uiAddr &= SGX_MMU_PTE_ADDR_MASK;
1286
1287         return sDevPAddr;
1288 }
1289
1290 struct IMG_DEV_PHYADDR MMU_GetPDDevPAddr(struct MMU_CONTEXT *pMMUContext)
1291 {
1292         return pMMUContext->sPDDevPAddr;
1293 }
1294
1295 enum PVRSRV_ERROR SGXGetPhysPageAddrKM(void *hDevMemHeap,
1296                                       struct IMG_DEV_VIRTADDR sDevVAddr,
1297                                       struct IMG_DEV_PHYADDR *pDevPAddr,
1298                                       struct IMG_CPU_PHYADDR *pCpuPAddr)
1299 {
1300         struct MMU_HEAP *pMMUHeap;
1301         struct IMG_DEV_PHYADDR DevPAddr;
1302
1303         pMMUHeap = (struct MMU_HEAP *)BM_GetMMUHeap(hDevMemHeap);
1304
1305         DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
1306         pCpuPAddr->uiAddr = DevPAddr.uiAddr;
1307         pDevPAddr->uiAddr = DevPAddr.uiAddr;
1308
1309         return (pDevPAddr->uiAddr != 0) ?
1310                 PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
1311 }
1312
1313 enum PVRSRV_ERROR SGXGetMMUPDAddrKM(void *hDevCookie,
1314                                void *hDevMemContext,
1315                                struct IMG_DEV_PHYADDR *psPDDevPAddr)
1316 {
1317         if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
1318                 return PVRSRV_ERROR_INVALID_PARAMS;
1319
1320         *psPDDevPAddr =
1321             ((struct BM_CONTEXT *)hDevMemContext)->psMMUContext->sPDDevPAddr;
1322
1323         return PVRSRV_OK;
1324 }
1325
1326 enum PVRSRV_ERROR MMU_BIFResetPDAlloc(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1327 {
1328         enum PVRSRV_ERROR eError;
1329         struct SYS_DATA *psSysData;
1330         struct RA_ARENA *psLocalDevMemArena;
1331         void *hOSMemHandle = NULL;
1332         u8 *pui8MemBlock = NULL;
1333         struct IMG_SYS_PHYADDR sMemBlockSysPAddr;
1334         struct IMG_CPU_PHYADDR sMemBlockCpuPAddr;
1335
1336         eError = SysAcquireData(&psSysData);
1337         if (eError != PVRSRV_OK) {
1338                 PVR_DPF(PVR_DBG_ERROR,
1339                    "MMU_BIFResetPDAlloc: ERROR call to SysAcquireData failed");
1340                 return eError;
1341         }
1342
1343         psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1344
1345         if (psLocalDevMemArena == NULL) {
1346
1347                 eError =
1348                     OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
1349                                  PVRSRV_HAP_KERNEL_ONLY, 3 * SGX_MMU_PAGE_SIZE,
1350                                  SGX_MMU_PAGE_SIZE, (void **)&pui8MemBlock,
1351                                  &hOSMemHandle);
1352                 if (eError != PVRSRV_OK) {
1353                         PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1354                                         "ERROR call to OSAllocPages failed");
1355                         return eError;
1356                 }
1357                 sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
1358         } else {
1359                 if (RA_Alloc(psLocalDevMemArena, 3 * SGX_MMU_PAGE_SIZE,
1360                              NULL, 0, SGX_MMU_PAGE_SIZE,
1361                              &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE) {
1362                         PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1363                                         "ERROR call to RA_Alloc failed");
1364                         return PVRSRV_ERROR_OUT_OF_MEMORY;
1365                 }
1366
1367                 sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
1368                 pui8MemBlock = (void __force *)OSMapPhysToLin(sMemBlockCpuPAddr,
1369                                               SGX_MMU_PAGE_SIZE * 3,
1370                                               PVRSRV_HAP_WRITECOMBINE |
1371                                               PVRSRV_HAP_KERNEL_ONLY,
1372                                               &hOSMemHandle);
1373                 if (!pui8MemBlock) {
1374                         PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
1375                                         "ERROR failed to map page tables");
1376                         return PVRSRV_ERROR_BAD_MAPPING;
1377                 }
1378         }
1379
1380         psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
1381         psDevInfo->sBIFResetPDDevPAddr =
1382             SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
1383         psDevInfo->sBIFResetPTDevPAddr.uiAddr =
1384             psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1385         psDevInfo->sBIFResetPageDevPAddr.uiAddr =
1386             psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
1387         psDevInfo->pui32BIFResetPD = (u32 *) pui8MemBlock;
1388         psDevInfo->pui32BIFResetPT =
1389             (u32 *) (pui8MemBlock + SGX_MMU_PAGE_SIZE);
1390
1391         OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
1392         OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
1393
1394         OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB,
1395                  SGX_MMU_PAGE_SIZE);
1396
1397         return PVRSRV_OK;
1398 }
1399
1400 void MMU_BIFResetPDFree(struct PVRSRV_SGXDEV_INFO *psDevInfo)
1401 {
1402         enum PVRSRV_ERROR eError;
1403         struct SYS_DATA *psSysData;
1404         struct RA_ARENA *psLocalDevMemArena;
1405         struct IMG_SYS_PHYADDR sPDSysPAddr;
1406
1407         eError = SysAcquireData(&psSysData);
1408         if (eError != PVRSRV_OK) {
1409                 PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDFree: "
1410                                 "ERROR call to SysAcquireData failed");
1411                 return;
1412         }
1413
1414         psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
1415
1416         if (psLocalDevMemArena == NULL) {
1417                 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1418                             3 * SGX_MMU_PAGE_SIZE,
1419                             psDevInfo->pui32BIFResetPD,
1420                             psDevInfo->hBIFResetPDOSMemHandle);
1421         } else {
1422                 OSUnMapPhysToLin((void __force __iomem *)
1423                                         psDevInfo->pui32BIFResetPD,
1424                                  3 * SGX_MMU_PAGE_SIZE,
1425                                  PVRSRV_HAP_WRITECOMBINE |
1426                                         PVRSRV_HAP_KERNEL_ONLY,
1427                                  psDevInfo->hBIFResetPDOSMemHandle);
1428
1429                 sPDSysPAddr =
1430                     SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX,
1431                                           psDevInfo->sBIFResetPDDevPAddr);
1432                 RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
1433         }
1434 }
1435
1436 u32 mmu_get_page_dir(struct MMU_CONTEXT *psMMUContext)
1437 {
1438         return psMMUContext->sPDDevPAddr.uiAddr;
1439 }