1 /**********************************************************************
3 * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful but, except
10 * as otherwise stated in writing, without any warranty; without even the
11 * implied warranty of merchantability or fitness for a particular purpose.
12 * See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * Imagination Technologies Ltd. <gpl-support@imgtec.com>
23 * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
25 ******************************************************************************/
27 #include <linux/kernel.h>
28 #include <linux/version.h>
30 #include <linux/module.h>
31 #include <linux/vmalloc.h>
32 #include <linux/slab.h>
35 #include <asm/shmparam.h>
36 #include <asm/pgtable.h>
37 #include <linux/sched.h>
38 #include <asm/current.h>
41 #include "servicesint.h"
46 #include "pvr_debug.h"
51 #include "env_perproc.h"
52 #include "bridged_support.h"
54 static struct mutex g_sMMapMutex;
56 static struct kmem_cache *g_psMemmapCache;
57 static LIST_HEAD(g_sMMapAreaList);
58 static LIST_HEAD(g_sMMapOffsetStructList);
59 #if defined(DEBUG_LINUX_MMAP_AREAS)
60 static u32 g_ui32RegisteredAreas;
61 static u32 g_ui32TotalByteSize;
64 #define LAST_PHYSICAL_PFN 0x7ffffffful
65 #define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1)
66 #define LAST_SPECIAL_PFN 0xfffffffful
68 #define MAX_MMAP_HANDLE 0x7ffffffful
70 static inline IMG_BOOL PFNIsPhysical(u32 pfn)
72 return pfn <= LAST_PHYSICAL_PFN;
75 static inline IMG_BOOL PFNIsSpecial(u32 pfn)
77 return pfn >= FIRST_SPECIAL_PFN && pfn <= LAST_SPECIAL_PFN;
80 static inline void *MMapOffsetToHandle(u32 pfn)
82 if (PFNIsPhysical(pfn)) {
83 PVR_ASSERT(PFNIsPhysical(pfn));
87 return (void *)(pfn - FIRST_SPECIAL_PFN);
90 static inline u32 HandleToMMapOffset(void *hHandle)
92 u32 ulHandle = (u32) hHandle;
94 if (PFNIsSpecial(ulHandle)) {
95 PVR_ASSERT(PFNIsSpecial(ulHandle));
99 return ulHandle + FIRST_SPECIAL_PFN;
102 static inline IMG_BOOL LinuxMemAreaUsesPhysicalMap(
103 struct LinuxMemArea *psLinuxMemArea)
105 return LinuxMemAreaPhysIsContig(psLinuxMemArea);
108 static inline u32 GetCurrentThreadID(void)
111 return (u32) current->pid;
114 static struct KV_OFFSET_STRUCT *CreateOffsetStruct(struct LinuxMemArea
117 u32 ui32RealByteSize)
119 struct KV_OFFSET_STRUCT *psOffsetStruct;
120 #if defined(CONFIG_PVR_DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
121 const char *pszName =
122 LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
125 PVR_DPF(PVR_DBG_MESSAGE,
126 "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8lx)",
127 __func__, pszName, psLinuxMemArea,
128 psLinuxMemArea->ui32AreaFlags);
130 PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC
131 || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType !=
132 LINUX_MEM_AREA_SUB_ALLOC);
134 PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
136 psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
137 if (psOffsetStruct == NULL) {
138 PVR_DPF(PVR_DBG_ERROR, "PVRMMapRegisterArea: "
139 "Couldn't alloc another mapping record from cache");
143 psOffsetStruct->ui32MMapOffset = ui32Offset;
144 psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
145 psOffsetStruct->ui32Mapped = 0;
146 psOffsetStruct->ui32RealByteSize = ui32RealByteSize;
147 psOffsetStruct->ui32TID = GetCurrentThreadID();
148 psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM();
149 psOffsetStruct->bOnMMapList = IMG_FALSE;
150 psOffsetStruct->ui32RefCount = 0;
151 psOffsetStruct->ui32UserVAddr = 0;
152 #if defined(DEBUG_LINUX_MMAP_AREAS)
154 psOffsetStruct->pszName = pszName;
157 list_add_tail(&psOffsetStruct->sAreaItem,
158 &psLinuxMemArea->sMMapOffsetStructList);
160 return psOffsetStruct;
163 static void DestroyOffsetStruct(struct KV_OFFSET_STRUCT *psOffsetStruct)
165 list_del(&psOffsetStruct->sAreaItem);
167 if (psOffsetStruct->bOnMMapList)
168 list_del(&psOffsetStruct->sMMapItem);
170 PVR_DPF(PVR_DBG_MESSAGE, "%s: Table entry: "
171 "psLinuxMemArea=0x%08lX, CpuPAddr=0x%08lX", __func__,
172 psOffsetStruct->psLinuxMemArea,
173 LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0));
175 KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
178 static inline void DetermineUsersSizeAndByteOffset(struct LinuxMemArea
180 u32 *pui32RealByteSize,
181 u32 *pui32ByteOffset)
183 u32 ui32PageAlignmentOffset;
184 struct IMG_CPU_PHYADDR CpuPAddr;
186 CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
187 ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
189 *pui32ByteOffset = ui32PageAlignmentOffset;
192 PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
195 enum PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(
196 struct PVRSRV_PER_PROCESS_DATA *psPerProc,
197 void *hMHandle, u32 *pui32MMapOffset,
198 u32 *pui32ByteOffset, u32 *pui32RealByteSize,
201 struct LinuxMemArea *psLinuxMemArea;
202 struct KV_OFFSET_STRUCT *psOffsetStruct;
204 enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
206 mutex_lock(&g_sMMapMutex);
208 PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <=
212 PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle,
214 if (eError != PVRSRV_OK) {
215 PVR_DPF(PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed",
221 psLinuxMemArea = (struct LinuxMemArea *)hOSMemHandle;
223 DetermineUsersSizeAndByteOffset(psLinuxMemArea,
224 pui32RealByteSize, pui32ByteOffset);
226 list_for_each_entry(psOffsetStruct,
227 &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) {
228 if (psPerProc->ui32PID == psOffsetStruct->ui32PID) {
229 PVR_ASSERT(*pui32RealByteSize ==
230 psOffsetStruct->ui32RealByteSize);
232 *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
233 *pui32UserVAddr = psOffsetStruct->ui32UserVAddr;
234 psOffsetStruct->ui32RefCount++;
243 if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea)) {
244 *pui32MMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0);
245 PVR_ASSERT(PFNIsPhysical(*pui32MMapOffset));
247 *pui32MMapOffset = HandleToMMapOffset(hMHandle);
248 PVR_ASSERT(PFNIsSpecial(*pui32MMapOffset));
251 psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *pui32MMapOffset,
253 if (psOffsetStruct == NULL) {
254 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
258 list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList);
259 psOffsetStruct->bOnMMapList = IMG_TRUE;
260 psOffsetStruct->ui32RefCount++;
264 mutex_unlock(&g_sMMapMutex);
269 enum PVRSRV_ERROR PVRMMapReleaseMMapData(
270 struct PVRSRV_PER_PROCESS_DATA *psPerProc,
271 void *hMHandle, IMG_BOOL *pbMUnmap,
272 u32 *pui32RealByteSize, u32 *pui32UserVAddr)
274 struct LinuxMemArea *psLinuxMemArea;
275 struct KV_OFFSET_STRUCT *psOffsetStruct;
277 enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
278 u32 ui32PID = OSGetCurrentProcessIDKM();
280 mutex_lock(&g_sMMapMutex);
282 PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <=
285 eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle,
287 if (eError != PVRSRV_OK) {
288 PVR_DPF(PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed",
294 psLinuxMemArea = (struct LinuxMemArea *)hOSMemHandle;
296 list_for_each_entry(psOffsetStruct,
297 &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) {
298 if (psOffsetStruct->ui32PID == ui32PID) {
299 if (psOffsetStruct->ui32RefCount == 0) {
300 PVR_DPF(PVR_DBG_ERROR, "%s: Attempt to "
301 "release mmap data with zero reference "
302 "count for offset struct 0x%p, "
304 __func__, psOffsetStruct,
306 eError = PVRSRV_ERROR_GENERIC;
310 psOffsetStruct->ui32RefCount--;
312 *pbMUnmap = (psOffsetStruct->ui32RefCount == 0)
313 && (psOffsetStruct->ui32UserVAddr != 0);
316 (*pbMUnmap) ? psOffsetStruct->ui32UserVAddr : 0;
318 (*pbMUnmap) ? psOffsetStruct->ui32RealByteSize : 0;
325 PVR_DPF(PVR_DBG_ERROR, "%s: Mapping data not found for handle "
326 "0x%lx (memory area 0x%p)",
327 __func__, hMHandle, psLinuxMemArea);
329 eError = PVRSRV_ERROR_GENERIC;
332 mutex_unlock(&g_sMMapMutex);
337 static inline struct KV_OFFSET_STRUCT *FindOffsetStructByOffset(u32 ui32Offset,
338 u32 ui32RealByteSize)
340 struct KV_OFFSET_STRUCT *psOffsetStruct;
341 u32 ui32TID = GetCurrentThreadID();
342 u32 ui32PID = OSGetCurrentProcessIDKM();
344 list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList,
346 if (ui32Offset == psOffsetStruct->ui32MMapOffset &&
347 ui32RealByteSize == psOffsetStruct->ui32RealByteSize &&
348 psOffsetStruct->ui32PID == ui32PID)
349 if (!PFNIsPhysical(ui32Offset) ||
350 psOffsetStruct->ui32TID == ui32TID)
351 return psOffsetStruct;
357 static IMG_BOOL DoMapToUser(struct LinuxMemArea *psLinuxMemArea,
358 struct vm_area_struct *ps_vma, u32 ui32ByteOffset)
362 if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
363 return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea), ps_vma,
364 psLinuxMemArea->uData.sSubAlloc.
365 ui32ByteOffset + ui32ByteOffset);
367 ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
368 PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0);
370 if (PFNIsPhysical(ps_vma->vm_pgoff)) {
373 PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea));
374 PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset)
375 == ps_vma->vm_pgoff);
378 IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start,
379 ps_vma->vm_pgoff, ui32ByteSize,
380 ps_vma->vm_page_prot);
385 PVR_DPF(PVR_DBG_MESSAGE,
386 "%s: Failed to map contiguous physical address "
387 "range (%d), trying non-contiguous path",
393 u32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize;
396 for (ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd;
397 ui32PA += PAGE_SIZE) {
398 u32 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
400 if (!pfn_valid(pfn)) {
401 PVR_DPF(PVR_DBG_ERROR,
402 "%s: Error - PFN invalid: 0x%lx",
408 ulVMAPos = ps_vma->vm_start;
409 for (ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd;
410 ui32PA += PAGE_SIZE) {
415 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
416 PVR_ASSERT(pfn_valid(pfn));
418 psPage = pfn_to_page(pfn);
420 result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage);
422 PVR_DPF(PVR_DBG_ERROR,
423 "%s: Error - VM_INSERT_PAGE failed (%d)",
427 ulVMAPos += PAGE_SIZE;
434 static IMG_BOOL CheckSize(struct LinuxMemArea *psLinuxMemArea, u32 ui32ByteSize)
436 struct IMG_CPU_PHYADDR CpuPAddr;
437 u32 ui32PageAlignmentOffset;
438 u32 ui32RealByteSize;
439 CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
440 ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
442 PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
443 if (ui32RealByteSize < ui32ByteSize) {
444 PVR_DPF(PVR_DBG_ERROR, "Cannot mmap %ld bytes from: "
445 "%-8p %-8p %08lx %-8ld %-24s\n",
446 ui32ByteSize, psLinuxMemArea,
447 LinuxMemAreaToCpuVAddr(psLinuxMemArea),
448 LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0).uiAddr,
449 psLinuxMemArea->ui32ByteSize,
450 LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType));
456 static void MMapVOpenNoLock(struct vm_area_struct *ps_vma)
458 struct KV_OFFSET_STRUCT *psOffsetStruct =
459 (struct KV_OFFSET_STRUCT *)ps_vma->vm_private_data;
461 PVR_ASSERT(psOffsetStruct != NULL);
462 psOffsetStruct->ui32Mapped++;
463 PVR_ASSERT(!psOffsetStruct->bOnMMapList);
465 if (psOffsetStruct->ui32Mapped > 1) {
466 PVR_DPF(PVR_DBG_WARNING,
467 "%s: Offset structure 0x%p is being shared "
468 "across processes (psOffsetStruct->ui32Mapped: %lu)",
469 __func__, psOffsetStruct, psOffsetStruct->ui32Mapped);
470 PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0);
472 #if defined(DEBUG_LINUX_MMAP_AREAS)
474 PVR_DPF(PVR_DBG_MESSAGE,
475 "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset %ld, ui32Mapped %d",
477 psOffsetStruct->psLinuxMemArea,
478 LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
479 psOffsetStruct->ui32MMapOffset, psOffsetStruct->ui32Mapped);
484 static void MMapVOpen(struct vm_area_struct *ps_vma)
486 mutex_lock(&g_sMMapMutex);
487 MMapVOpenNoLock(ps_vma);
488 mutex_unlock(&g_sMMapMutex);
491 static void MMapVCloseNoLock(struct vm_area_struct *ps_vma)
493 struct KV_OFFSET_STRUCT *psOffsetStruct =
494 (struct KV_OFFSET_STRUCT *)ps_vma->vm_private_data;
496 PVR_ASSERT(psOffsetStruct != NULL);
497 #if defined(DEBUG_LINUX_MMAP_AREAS)
498 PVR_DPF(PVR_DBG_MESSAGE, "%s: psLinuxMemArea "
499 "0x%p, CpuVAddr 0x%p ui32MMapOffset %ld, ui32Mapped %d",
501 psOffsetStruct->psLinuxMemArea,
502 LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
503 psOffsetStruct->ui32MMapOffset,
504 psOffsetStruct->ui32Mapped);
507 PVR_ASSERT(!psOffsetStruct->bOnMMapList);
508 psOffsetStruct->ui32Mapped--;
509 if (psOffsetStruct->ui32Mapped == 0) {
510 if (psOffsetStruct->ui32RefCount != 0)
511 PVR_DPF(PVR_DBG_MESSAGE,
512 "%s: psOffsetStruct 0x%p has non-zero "
513 "reference count (ui32RefCount = %lu). "
514 "User mode address of start of mapping: 0x%lx",
515 __func__, psOffsetStruct,
516 psOffsetStruct->ui32RefCount,
517 psOffsetStruct->ui32UserVAddr);
519 DestroyOffsetStruct(psOffsetStruct);
521 ps_vma->vm_private_data = NULL;
524 static void MMapVClose(struct vm_area_struct *ps_vma)
526 mutex_lock(&g_sMMapMutex);
527 MMapVCloseNoLock(ps_vma);
528 mutex_unlock(&g_sMMapMutex);
531 static struct vm_operations_struct MMapIOOps = {
536 int PVRMMap(struct file *pFile, struct vm_area_struct *ps_vma)
539 struct KV_OFFSET_STRUCT *psOffsetStruct = NULL;
542 PVR_UNREFERENCED_PARAMETER(pFile);
544 mutex_lock(&g_sMMapMutex);
546 ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
548 PVR_DPF(PVR_DBG_MESSAGE,
549 "%s: Received mmap(2) request with ui32MMapOffset 0x%08lx,"
550 " and ui32ByteSize %ld(0x%08lx)", __func__, ps_vma->vm_pgoff,
551 ui32ByteSize, ui32ByteSize);
553 if ((ps_vma->vm_flags & VM_WRITE) && !(ps_vma->vm_flags & VM_SHARED)) {
554 PVR_DPF(PVR_DBG_ERROR,
555 "%s: Cannot mmap non-shareable writable areas",
558 goto unlock_and_return;
562 FindOffsetStructByOffset(ps_vma->vm_pgoff, ui32ByteSize);
563 if (psOffsetStruct == NULL) {
564 PVR_DPF(PVR_DBG_ERROR,
565 "%s: Attempted to mmap unregistered area at vm_pgoff %ld",
566 __func__, ps_vma->vm_pgoff);
568 goto unlock_and_return;
570 list_del(&psOffsetStruct->sMMapItem);
571 psOffsetStruct->bOnMMapList = IMG_FALSE;
573 PVR_DPF(PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n",
574 __func__, psOffsetStruct->psLinuxMemArea);
576 if (!CheckSize(psOffsetStruct->psLinuxMemArea, ui32ByteSize)) {
578 goto unlock_and_return;
581 ps_vma->vm_flags |= VM_RESERVED;
582 ps_vma->vm_flags |= VM_IO;
584 ps_vma->vm_flags |= VM_DONTEXPAND;
586 ps_vma->vm_flags |= VM_DONTCOPY;
588 ps_vma->vm_private_data = (void *)psOffsetStruct;
590 switch (psOffsetStruct->psLinuxMemArea->
591 ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK) {
592 case PVRSRV_HAP_CACHED:
595 case PVRSRV_HAP_WRITECOMBINE:
596 ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot);
598 case PVRSRV_HAP_UNCACHED:
599 ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot);
602 PVR_DPF(PVR_DBG_ERROR, "%s: unknown cache type", __func__);
604 goto unlock_and_return;
607 ps_vma->vm_ops = &MMapIOOps;
609 if (!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0)) {
611 goto unlock_and_return;
614 PVR_ASSERT(psOffsetStruct->ui32UserVAddr == 0);
616 psOffsetStruct->ui32UserVAddr = ps_vma->vm_start;
618 MMapVOpenNoLock(ps_vma);
620 PVR_DPF(PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
621 __func__, ps_vma->vm_pgoff);
624 if (iRetVal != 0 && psOffsetStruct != NULL)
625 DestroyOffsetStruct(psOffsetStruct);
627 mutex_unlock(&g_sMMapMutex);
632 #if defined(DEBUG_LINUX_MMAP_AREAS)
633 static off_t PrintMMapReg_helper(char *buffer, size_t size,
634 const struct KV_OFFSET_STRUCT *psOffsetStruct,
635 struct LinuxMemArea *psLinuxMemArea)
638 u32 ui32RealByteSize;
641 PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
643 DetermineUsersSizeAndByteOffset(psLinuxMemArea,
647 Ret = printAppend(buffer, size, 0,
648 "%-8p %08x %-8p %08x %08x "
649 "%-8d %-24s %-5u %-8s %08x(%s)\n",
651 psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
652 LinuxMemAreaToCpuVAddr(psLinuxMemArea),
653 LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0).uiAddr,
654 psOffsetStruct->ui32MMapOffset,
655 psLinuxMemArea->ui32ByteSize,
656 LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
657 psOffsetStruct->ui32PID,
658 psOffsetStruct->pszName,
659 psLinuxMemArea->ui32AreaFlags,
660 HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
665 static off_t PrintMMapRegistrations(char *buffer, size_t size, off_t off)
667 struct LinuxMemArea *psLinuxMemArea;
670 mutex_lock(&g_sMMapMutex);
673 Ret = printAppend(buffer, size, 0,
674 "Allocations registered for mmap: %u\n"
675 "In total these areas correspond to %u bytes\n"
676 "psLinuxMemArea UserVAddr KernelVAddr "
677 "CpuPAddr MMapOffset ByteLength "
680 g_ui32RegisteredAreas, g_ui32TotalByteSize);
682 goto unlock_and_return;
687 goto unlock_and_return;
690 PVR_ASSERT(off != 0);
691 list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem) {
692 struct KV_OFFSET_STRUCT *psOffsetStruct;
694 list_for_each_entry(psOffsetStruct,
695 &psLinuxMemArea->sMMapOffsetStructList,
699 Ret = PrintMMapReg_helper(buffer, size,
700 psOffsetStruct, psLinuxMemArea);
701 goto unlock_and_return;
708 mutex_unlock(&g_sMMapMutex);
713 enum PVRSRV_ERROR PVRMMapRegisterArea(struct LinuxMemArea *psLinuxMemArea)
715 enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
716 #if defined(CONFIG_PVR_DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
717 const char *pszName =
718 LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
721 mutex_lock(&g_sMMapMutex);
723 PVR_DPF(PVR_DBG_MESSAGE,
724 "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8lx)",
725 __func__, pszName, psLinuxMemArea,
726 psLinuxMemArea->ui32AreaFlags);
728 PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC
729 || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType !=
730 LINUX_MEM_AREA_SUB_ALLOC);
732 if (psLinuxMemArea->bMMapRegistered) {
733 PVR_DPF(PVR_DBG_ERROR,
734 "%s: psLinuxMemArea 0x%p is already registered",
735 __func__, psLinuxMemArea);
736 eError = PVRSRV_ERROR_INVALID_PARAMS;
740 list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList);
742 psLinuxMemArea->bMMapRegistered = IMG_TRUE;
744 #if defined(DEBUG_LINUX_MMAP_AREAS)
745 g_ui32RegisteredAreas++;
747 if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
748 g_ui32TotalByteSize += psLinuxMemArea->ui32ByteSize;
754 mutex_unlock(&g_sMMapMutex);
759 enum PVRSRV_ERROR PVRMMapRemoveRegisteredArea(
760 struct LinuxMemArea *psLinuxMemArea)
762 enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
763 struct KV_OFFSET_STRUCT *psOffsetStruct, *psTmpOffsetStruct;
765 mutex_lock(&g_sMMapMutex);
767 PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
769 list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct,
770 &psLinuxMemArea->sMMapOffsetStructList,
772 if (psOffsetStruct->ui32Mapped != 0) {
773 PVR_DPF(PVR_DBG_ERROR, "%s: psOffsetStruct "
774 "0x%p for memory area "
775 "0x0x%p is still mapped; "
776 "psOffsetStruct->ui32Mapped %lu",
777 __func__, psOffsetStruct, psLinuxMemArea,
778 psOffsetStruct->ui32Mapped);
779 eError = PVRSRV_ERROR_GENERIC;
783 PVR_DPF(PVR_DBG_WARNING,
784 "%s: psOffsetStruct 0x%p was never mapped",
785 __func__, psOffsetStruct);
788 PVR_ASSERT((psOffsetStruct->ui32Mapped == 0)
789 && psOffsetStruct->bOnMMapList);
791 DestroyOffsetStruct(psOffsetStruct);
794 list_del(&psLinuxMemArea->sMMapItem);
796 psLinuxMemArea->bMMapRegistered = IMG_FALSE;
798 #if defined(DEBUG_LINUX_MMAP_AREAS)
799 g_ui32RegisteredAreas--;
800 if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
801 g_ui32TotalByteSize -= psLinuxMemArea->ui32ByteSize;
807 mutex_unlock(&g_sMMapMutex);
811 enum PVRSRV_ERROR LinuxMMapPerProcessConnect(struct PVRSRV_ENV_PER_PROCESS_DATA
814 PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
819 void LinuxMMapPerProcessDisconnect(struct PVRSRV_ENV_PER_PROCESS_DATA
822 struct KV_OFFSET_STRUCT *psOffsetStruct, *psTmpOffsetStruct;
823 IMG_BOOL bWarn = IMG_FALSE;
824 u32 ui32PID = OSGetCurrentProcessIDKM();
826 PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
828 mutex_lock(&g_sMMapMutex);
830 list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct,
831 &g_sMMapOffsetStructList, sMMapItem) {
832 if (psOffsetStruct->ui32PID == ui32PID) {
834 PVR_DPF(PVR_DBG_WARNING, "%s: process has "
835 "unmapped offset structures. "
840 PVR_ASSERT(psOffsetStruct->ui32Mapped == 0);
841 PVR_ASSERT(psOffsetStruct->bOnMMapList);
843 DestroyOffsetStruct(psOffsetStruct);
847 mutex_unlock(&g_sMMapMutex);
850 enum PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(struct PVRSRV_HANDLE_BASE
853 enum PVRSRV_ERROR eError = PVRSRV_OK;
855 eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE);
856 if (eError != PVRSRV_OK) {
857 PVR_DPF(PVR_DBG_ERROR, "%s: failed to set handle limit (%d)",
865 void PVRMMapInit(void)
867 mutex_init(&g_sMMapMutex);
870 kmem_cache_create("img-mmap", sizeof(struct KV_OFFSET_STRUCT),
872 if (!g_psMemmapCache) {
873 PVR_DPF(PVR_DBG_ERROR, "%s: failed to allocate kmem_cache",
877 #if defined(DEBUG_LINUX_MMAP_AREAS)
878 CreateProcReadEntry("mmap", PrintMMapRegistrations);
888 void PVRMMapCleanup(void)
890 enum PVRSRV_ERROR eError;
892 if (!list_empty(&g_sMMapAreaList)) {
893 struct LinuxMemArea *psLinuxMemArea, *psTmpMemArea;
895 PVR_DPF(PVR_DBG_ERROR,
896 "%s: Memory areas are still registered with MMap",
899 PVR_TRACE("%s: Unregistering memory areas", __func__);
900 list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea,
901 &g_sMMapAreaList, sMMapItem) {
902 eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
903 if (eError != PVRSRV_OK)
904 PVR_DPF(PVR_DBG_ERROR,
905 "%s: PVRMMapRemoveRegisteredArea failed (%d)",
907 PVR_ASSERT(eError == PVRSRV_OK);
909 LinuxMemAreaDeepFree(psLinuxMemArea);
912 PVR_ASSERT(list_empty((&g_sMMapAreaList)));
914 RemoveProcEntry("mmap");
916 if (g_psMemmapCache) {
917 kmem_cache_destroy(g_psMemmapCache);
918 g_psMemmapCache = NULL;