1 /**********************************************************************
3 * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful but, except
10 * as otherwise stated in writing, without any warranty; without even the
11 * implied warranty of merchantability or fitness for a particular purpose.
12 * See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * Imagination Technologies Ltd. <gpl-support@imgtec.com>
23 * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
25 ******************************************************************************/
26 #include "services_headers.h"
28 #include "sysconfig.h"
31 #include "pvr_pdump.h"
34 #define MIN(a, b) (a > b ? b : a)
36 static IMG_BOOL ZeroBuf(struct BM_BUF *pBuf, struct BM_MAPPING *pMapping,
37 u32 ui32Bytes, u32 ui32Flags);
38 static void BM_FreeMemory(void *pH, u32 base, struct BM_MAPPING *psMapping);
39 static IMG_BOOL BM_ImportMemory(void *pH, size_t uSize,
40 size_t *pActualSize, struct BM_MAPPING **ppsMapping, u32 uFlags,
43 static IMG_BOOL DevMemoryAlloc(struct BM_CONTEXT *pBMContext,
44 struct BM_MAPPING *pMapping, u32 uFlags,
45 u32 dev_vaddr_alignment, struct IMG_DEV_VIRTADDR *pDevVAddr);
46 static void DevMemoryFree(struct BM_MAPPING *pMapping);
48 static IMG_BOOL AllocMemory(struct BM_CONTEXT *pBMContext,
49 struct BM_HEAP *psBMHeap, struct IMG_DEV_VIRTADDR *psDevVAddr,
50 size_t uSize, u32 uFlags, u32 uDevVAddrAlignment,
53 struct BM_MAPPING *pMapping;
55 struct RA_ARENA *pArena = NULL;
57 PVR_DPF(PVR_DBG_MESSAGE, "AllocMemory "
58 "(pBMContext=%08X, uSize=0x%x, uFlags=0x%x, "
59 "align=0x%x, pBuf=%08X)",
60 pBMContext, uSize, uFlags, uDevVAddrAlignment, pBuf);
62 if (uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) {
63 if (uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
64 PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
65 "combination of DevVAddr management and "
66 "RAM backing mode unsupported");
70 if (psBMHeap->ui32Attribs &
71 (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG |
72 PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
73 pArena = psBMHeap->pImportArena;
75 PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
76 "backing store type doesn't match heap");
80 if (!RA_Alloc(pArena, uSize, (void *)&pMapping, uFlags,
82 (u32 *)&(pBuf->DevVAddr.uiAddr))) {
83 PVR_DPF(PVR_DBG_ERROR,
84 "AllocMemory: RA_Alloc(0x%x) FAILED", uSize);
88 uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
89 if (pMapping->CpuVAddr) {
91 (void *)((u32) pMapping->CpuVAddr + uOffset);
93 pBuf->CpuVAddr = NULL;
96 if (uSize == pMapping->uSize) {
97 pBuf->hOSMemHandle = pMapping->hOSMemHandle;
99 if (OSGetSubMemHandle(pMapping->hOSMemHandle, uOffset,
100 uSize, psBMHeap->ui32Attribs,
101 &pBuf->hOSMemHandle) != PVRSRV_OK) {
102 PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
103 "OSGetSubMemHandle FAILED");
108 pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;
110 if (uFlags & PVRSRV_MEM_ZERO)
111 if (!ZeroBuf(pBuf, pMapping, uSize,
112 psBMHeap->ui32Attribs | uFlags))
115 if (uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
116 PVR_ASSERT(psDevVAddr != NULL);
118 if (psDevVAddr == NULL) {
119 PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
120 "invalid parameter - psDevVAddr");
124 pBMContext->psDeviceNode->pfnMMUAlloc(
125 psBMHeap->pMMUHeap, uSize,
126 PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
127 uDevVAddrAlignment, psDevVAddr);
128 pBuf->DevVAddr = *psDevVAddr;
130 pBMContext->psDeviceNode->pfnMMUAlloc(psBMHeap->
136 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
137 sizeof(struct BM_MAPPING),
138 (void **)&pMapping, NULL) != PVRSRV_OK) {
139 PVR_DPF(PVR_DBG_ERROR,
140 "AllocMemory: OSAllocMem(0x%x) FAILED");
144 pBuf->CpuVAddr = NULL;
145 pBuf->hOSMemHandle = NULL;
146 pBuf->CpuPAddr.uiAddr = 0;
148 pMapping->CpuVAddr = NULL;
149 pMapping->CpuPAddr.uiAddr = 0;
150 pMapping->DevVAddr = pBuf->DevVAddr;
151 pMapping->psSysAddr = NULL;
152 pMapping->uSize = uSize;
153 pMapping->hOSMemHandle = NULL;
156 pMapping->pArena = pArena;
158 pMapping->pBMHeap = psBMHeap;
159 pBuf->pMapping = pMapping;
161 PVR_DPF(PVR_DBG_MESSAGE, "AllocMemory: "
162 "pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
163 pMapping, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr,
164 pMapping->CpuPAddr.uiAddr, pMapping->uSize);
166 PVR_DPF(PVR_DBG_MESSAGE, "AllocMemory: "
167 "pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
168 pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr,
169 pBuf->CpuPAddr.uiAddr, uSize);
171 PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
176 static IMG_BOOL WrapMemory(struct BM_HEAP *psBMHeap,
177 size_t uSize, u32 ui32BaseOffset, IMG_BOOL bPhysContig,
178 struct IMG_SYS_PHYADDR *psAddr, void *pvCPUVAddr, u32 uFlags,
181 struct IMG_DEV_VIRTADDR DevVAddr = { 0 };
182 struct BM_MAPPING *pMapping;
184 u32 const ui32PageSize = HOST_PAGESIZE();
186 PVR_DPF(PVR_DBG_MESSAGE,
187 "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, "
188 "bPhysContig=0x%x, pvCPUVAddr = 0x%x, flags=0x%x, pBuf=%08X)",
189 psBMHeap, uSize, ui32BaseOffset, bPhysContig, pvCPUVAddr,
192 PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
194 PVR_ASSERT(((u32) pvCPUVAddr & (ui32PageSize - 1)) == 0);
196 uSize += ui32BaseOffset;
197 uSize = HOST_PAGEALIGN(uSize);
199 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*pMapping),
200 (void **)&pMapping, NULL) != PVRSRV_OK) {
201 PVR_DPF(PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",
206 OSMemSet(pMapping, 0, sizeof(*pMapping));
208 pMapping->uSize = uSize;
209 pMapping->pBMHeap = psBMHeap;
212 pMapping->CpuVAddr = pvCPUVAddr;
215 pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
216 pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
218 if (OSRegisterMem(pMapping->CpuPAddr,
219 pMapping->CpuVAddr, pMapping->uSize,
220 uFlags, &pMapping->hOSMemHandle) !=
222 PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
223 "OSRegisterMem Phys=0x%08X, "
224 "CpuVAddr = 0x%08X, Size=%d) failed",
225 pMapping->CpuPAddr, pMapping->CpuVAddr,
230 pMapping->eCpuMemoryOrigin =
231 hm_wrapped_scatter_virtaddr;
232 pMapping->psSysAddr = psAddr;
234 if (OSRegisterDiscontigMem(pMapping->psSysAddr,
238 &pMapping->hOSMemHandle) !=
240 PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
241 "OSRegisterDiscontigMem CpuVAddr = "
242 "0x%08X, Size=%d) failed",
243 pMapping->CpuVAddr, pMapping->uSize);
249 pMapping->eCpuMemoryOrigin = hm_wrapped;
250 pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
252 if (OSReservePhys(pMapping->CpuPAddr, pMapping->uSize,
253 uFlags, &pMapping->CpuVAddr,
254 &pMapping->hOSMemHandle) != PVRSRV_OK) {
255 PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
256 "OSReservePhys Phys=0x%08X, Size=%d) "
258 pMapping->CpuPAddr, pMapping->uSize);
262 pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
263 pMapping->psSysAddr = psAddr;
265 if (OSReserveDiscontigPhys(pMapping->psSysAddr,
266 pMapping->uSize, uFlags,
268 &pMapping->hOSMemHandle) !=
270 PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
271 "OSReserveDiscontigPhys Size=%d) failed",
278 bResult = DevMemoryAlloc(psBMHeap->pBMContext, pMapping,
279 uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
280 ui32PageSize, &DevVAddr);
282 PVR_DPF(PVR_DBG_ERROR,
283 "WrapMemory: DevMemoryAlloc(0x%x) failed",
288 pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
290 pBuf->hOSMemHandle = pMapping->hOSMemHandle;
292 if (OSGetSubMemHandle(pMapping->hOSMemHandle,
294 (pMapping->uSize - ui32BaseOffset),
296 &pBuf->hOSMemHandle) != PVRSRV_OK) {
297 PVR_DPF(PVR_DBG_ERROR,
298 "WrapMemory: OSGetSubMemHandle failed");
301 if (pMapping->CpuVAddr)
302 pBuf->CpuVAddr = (void *)((u32) pMapping->CpuVAddr +
304 pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + ui32BaseOffset;
306 if (uFlags & PVRSRV_MEM_ZERO)
307 if (!ZeroBuf(pBuf, pMapping, uSize, uFlags))
310 PVR_DPF(PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr);
311 PVR_DPF(PVR_DBG_MESSAGE, "WrapMemory: pMapping=%08X: DevV=%08X "
312 "CpuV=%08X CpuP=%08X uSize=0x%x",
313 pMapping, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr,
314 pMapping->CpuPAddr.uiAddr, pMapping->uSize);
315 PVR_DPF(PVR_DBG_MESSAGE, "WrapMemory: pBuf=%08X: DevV=%08X "
316 "CpuV=%08X CpuP=%08X uSize=0x%x",
317 pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr,
318 pBuf->CpuPAddr.uiAddr, uSize);
320 pBuf->pMapping = pMapping;
324 if (ui32BaseOffset && pBuf->hOSMemHandle)
325 OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
327 if (pMapping->CpuVAddr || pMapping->hOSMemHandle)
328 switch (pMapping->eCpuMemoryOrigin) {
330 OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize,
331 uFlags, pMapping->hOSMemHandle);
333 case hm_wrapped_virtaddr:
334 OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize,
335 uFlags, pMapping->hOSMemHandle);
337 case hm_wrapped_scatter:
338 OSUnReserveDiscontigPhys(pMapping->CpuVAddr,
339 pMapping->uSize, uFlags,
340 pMapping->hOSMemHandle);
342 case hm_wrapped_scatter_virtaddr:
343 OSUnRegisterDiscontigMem(pMapping->CpuVAddr,
344 pMapping->uSize, uFlags,
345 pMapping->hOSMemHandle);
351 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping,
357 static IMG_BOOL ZeroBuf(struct BM_BUF *pBuf, struct BM_MAPPING *pMapping,
358 u32 ui32Bytes, u32 ui32Flags)
362 if (pBuf->CpuVAddr) {
363 OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
364 } else if (pMapping->eCpuMemoryOrigin == hm_contiguous ||
365 pMapping->eCpuMemoryOrigin == hm_wrapped) {
366 pvCpuVAddr = (void __force *)OSMapPhysToLin(pBuf->CpuPAddr,
368 PVRSRV_HAP_KERNEL_ONLY |
370 PVRSRV_HAP_CACHETYPE_MASK),
373 PVR_DPF(PVR_DBG_ERROR, "ZeroBuf: "
374 "OSMapPhysToLin for contiguous buffer failed");
377 OSMemSet(pvCpuVAddr, 0, ui32Bytes);
378 OSUnMapPhysToLin((void __force __iomem *)pvCpuVAddr, ui32Bytes,
379 PVRSRV_HAP_KERNEL_ONLY |
380 (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
383 u32 ui32BytesRemaining = ui32Bytes;
384 u32 ui32CurrentOffset = 0;
385 struct IMG_CPU_PHYADDR CpuPAddr;
387 PVR_ASSERT(pBuf->hOSMemHandle);
389 while (ui32BytesRemaining > 0) {
391 MIN(ui32BytesRemaining, HOST_PAGESIZE());
393 OSMemHandleToCpuPAddr(pBuf->hOSMemHandle,
396 if (CpuPAddr.uiAddr & (HOST_PAGESIZE() - 1))
398 MIN(ui32BytesRemaining,
399 HOST_PAGEALIGN(CpuPAddr.uiAddr) -
402 pvCpuVAddr = (void __force *)OSMapPhysToLin(CpuPAddr,
404 PVRSRV_HAP_KERNEL_ONLY |
406 PVRSRV_HAP_CACHETYPE_MASK),
409 PVR_DPF(PVR_DBG_ERROR, "ZeroBuf: "
410 "OSMapPhysToLin while "
411 "zeroing non-contiguous memory FAILED");
414 OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
415 OSUnMapPhysToLin((void __force __iomem *)pvCpuVAddr,
417 PVRSRV_HAP_KERNEL_ONLY |
419 PVRSRV_HAP_CACHETYPE_MASK),
422 ui32BytesRemaining -= ui32BlockBytes;
423 ui32CurrentOffset += ui32BlockBytes;
430 static void FreeBuf(struct BM_BUF *pBuf, u32 ui32Flags)
432 struct BM_MAPPING *pMapping;
434 PVR_DPF(PVR_DBG_MESSAGE,
435 "FreeBuf: pBuf=%08X: DevVAddr=%08X CpuVAddr=%08X CpuPAddr=%08X",
436 pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr,
437 pBuf->CpuPAddr.uiAddr);
439 pMapping = pBuf->pMapping;
441 if (ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
442 if (ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
443 PVR_DPF(PVR_DBG_ERROR, "FreeBuf: "
444 "combination of DevVAddr management "
445 "and RAM backing mode unsupported");
447 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
448 sizeof(struct BM_MAPPING),
451 if (pBuf->hOSMemHandle != pMapping->hOSMemHandle)
452 OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
453 if (ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) {
454 RA_Free(pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr,
457 switch (pMapping->eCpuMemoryOrigin) {
459 OSUnReservePhys(pMapping->CpuVAddr,
460 pMapping->uSize, ui32Flags,
461 pMapping->hOSMemHandle);
463 case hm_wrapped_virtaddr:
464 OSUnRegisterMem(pMapping->CpuVAddr,
465 pMapping->uSize, ui32Flags,
466 pMapping->hOSMemHandle);
468 case hm_wrapped_scatter:
469 OSUnReserveDiscontigPhys(pMapping->CpuVAddr,
475 case hm_wrapped_scatter_virtaddr:
476 OSUnRegisterDiscontigMem(pMapping->CpuVAddr,
486 DevMemoryFree(pMapping);
488 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
489 sizeof(struct BM_MAPPING), pMapping, NULL);
493 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF), pBuf, NULL);
496 void BM_DestroyContext(void *hBMContext)
498 struct BM_CONTEXT *pBMContext = (struct BM_CONTEXT *)hBMContext;
499 struct BM_HEAP *psBMHeap;
501 PVR_DPF(PVR_DBG_MESSAGE, "BM_DestroyContext");
503 for (psBMHeap = pBMContext->psBMHeap;
504 psBMHeap != NULL; psBMHeap = psBMHeap->psNext)
505 if (psBMHeap->ui32Attribs &
506 (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG |
507 PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
508 if (psBMHeap->pImportArena) {
509 IMG_BOOL bTestDelete =
510 RA_TestDelete(psBMHeap->pImportArena);
511 BUG_ON(!bTestDelete);
514 ResManFreeResByPtr(pBMContext->hResItem);
517 static enum PVRSRV_ERROR BM_DestroyContextCallBack(void *pvParam, u32 ui32Param)
519 struct BM_CONTEXT *pBMContext = pvParam;
520 struct BM_CONTEXT **ppBMContext;
521 struct BM_HEAP *psBMHeap, *psTmpBMHeap;
522 struct PVRSRV_DEVICE_NODE *psDeviceNode;
524 PVR_UNREFERENCED_PARAMETER(ui32Param);
526 psDeviceNode = pBMContext->psDeviceNode;
528 psBMHeap = pBMContext->psBMHeap;
530 if (psBMHeap->ui32Attribs &
531 (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG |
532 PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
533 if (psBMHeap->pImportArena)
534 RA_Delete(psBMHeap->pImportArena);
536 PVR_DPF(PVR_DBG_ERROR, "BM_DestroyContext: "
537 "backing store type unsupported");
538 return PVRSRV_ERROR_GENERIC;
541 psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
543 psTmpBMHeap = psBMHeap;
545 psBMHeap = psBMHeap->psNext;
547 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_HEAP),
551 if (pBMContext->psMMUContext)
552 psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
554 if (pBMContext->pBufferHash)
555 HASH_Delete(pBMContext->pBufferHash);
557 if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext) {
558 psDeviceNode->sDevMemoryInfo.pBMKernelContext = NULL;
560 for (ppBMContext = &psDeviceNode->sDevMemoryInfo.pBMContext;
561 *ppBMContext; ppBMContext = &((*ppBMContext)->psNext))
562 if (*ppBMContext == pBMContext) {
563 *ppBMContext = pBMContext->psNext;
568 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_CONTEXT),
574 void *BM_CreateContext(struct PVRSRV_DEVICE_NODE *psDeviceNode,
575 struct IMG_DEV_PHYADDR *psPDDevPAddr,
576 struct PVRSRV_PER_PROCESS_DATA *psPerProc, IMG_BOOL *pbCreated)
578 struct BM_CONTEXT *pBMContext;
579 struct BM_HEAP *psBMHeap;
580 struct DEVICE_MEMORY_INFO *psDevMemoryInfo;
581 IMG_BOOL bKernelContext;
582 struct RESMAN_CONTEXT *hResManContext;
584 PVR_DPF(PVR_DBG_MESSAGE, "BM_CreateContext");
586 if (psPerProc == NULL) {
587 bKernelContext = IMG_TRUE;
588 hResManContext = psDeviceNode->hResManContext;
590 bKernelContext = IMG_FALSE;
591 hResManContext = psPerProc->hResManContext;
594 if (pbCreated != NULL)
595 *pbCreated = IMG_FALSE;
597 psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
599 if (bKernelContext == IMG_FALSE)
600 for (pBMContext = psDevMemoryInfo->pBMContext;
601 pBMContext != NULL; pBMContext = pBMContext->psNext)
602 if (ResManFindResourceByPtr(hResManContext,
603 pBMContext->hResItem) ==
605 pBMContext->ui32RefCount++;
606 return (void *)pBMContext;
608 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_CONTEXT),
609 (void **)&pBMContext, NULL) != PVRSRV_OK) {
610 PVR_DPF(PVR_DBG_ERROR, "BM_CreateContext: Alloc failed");
613 OSMemSet(pBMContext, 0, sizeof(struct BM_CONTEXT));
615 pBMContext->psDeviceNode = psDeviceNode;
617 pBMContext->pBufferHash = HASH_Create(32);
618 if (pBMContext->pBufferHash == NULL) {
619 PVR_DPF(PVR_DBG_ERROR,
620 "BM_CreateContext: HASH_Create failed");
624 if (psDeviceNode->pfnMMUInitialise(psDeviceNode,
625 &pBMContext->psMMUContext,
626 psPDDevPAddr) != PVRSRV_OK) {
627 PVR_DPF(PVR_DBG_ERROR,
628 "BM_CreateContext: MMUInitialise failed");
632 if (bKernelContext) {
633 PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == NULL);
634 psDevMemoryInfo->pBMKernelContext = pBMContext;
637 PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
639 if (psDevMemoryInfo->pBMKernelContext == NULL) {
640 PVR_DPF(PVR_DBG_ERROR, "BM_CreateContext: "
641 "psDevMemoryInfo->pBMKernelContext invalid");
645 PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
647 pBMContext->psBMSharedHeap =
648 psDevMemoryInfo->pBMKernelContext->psBMHeap;
650 psBMHeap = pBMContext->psBMSharedHeap;
652 switch (psBMHeap->sDevArena.DevMemHeapType) {
653 case DEVICE_MEMORY_HEAP_SHARED:
654 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
657 pfnMMUInsertHeap(pBMContext->
664 psBMHeap = psBMHeap->psNext;
666 pBMContext->psNext = psDevMemoryInfo->pBMContext;
667 psDevMemoryInfo->pBMContext = pBMContext;
669 pBMContext->ui32RefCount++;
670 pBMContext->hResItem = ResManRegisterRes(hResManContext,
671 RESMAN_TYPE_DEVICEMEM_CONTEXT,
673 0, BM_DestroyContextCallBack);
674 if (pBMContext->hResItem == NULL) {
675 PVR_DPF(PVR_DBG_ERROR,
676 "BM_CreateContext: ResManRegisterRes failed");
680 if (pbCreated != NULL)
681 *pbCreated = IMG_TRUE;
682 return (void *)pBMContext;
685 BM_DestroyContextCallBack(pBMContext, 0);
690 void *BM_CreateHeap(void *hBMContext,
691 struct DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
693 struct BM_CONTEXT *pBMContext = (struct BM_CONTEXT *)hBMContext;
694 struct PVRSRV_DEVICE_NODE *psDeviceNode = pBMContext->psDeviceNode;
695 struct BM_HEAP *psBMHeap;
697 PVR_DPF(PVR_DBG_MESSAGE, "BM_CreateHeap");
699 if (pBMContext->ui32RefCount > 0) {
700 psBMHeap = pBMContext->psBMHeap;
703 if (psBMHeap->sDevArena.ui32HeapID ==
704 psDevMemHeapInfo->ui32HeapID)
707 psBMHeap = psBMHeap->psNext;
711 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_HEAP),
712 (void **) &psBMHeap, NULL) != PVRSRV_OK) {
713 PVR_DPF(PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed");
717 OSMemSet(psBMHeap, 0, sizeof(struct BM_HEAP));
719 psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
720 psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
721 psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
722 psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
723 psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
724 psBMHeap->sDevArena.ui32DataPageSize =
725 psDevMemHeapInfo->ui32DataPageSize;
726 psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
727 psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
729 psBMHeap->pBMContext = pBMContext;
732 psDeviceNode->pfnMMUCreate(pBMContext->psMMUContext,
733 &psBMHeap->sDevArena,
734 &psBMHeap->pVMArena);
735 if (!psBMHeap->pMMUHeap) {
736 PVR_DPF(PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed");
740 psBMHeap->pImportArena = RA_Create(psDevMemHeapInfo->pszBSName,
742 psBMHeap->sDevArena.ui32DataPageSize,
744 BM_FreeMemory, NULL, psBMHeap);
745 if (psBMHeap->pImportArena == NULL) {
746 PVR_DPF(PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed");
750 if (psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
752 psBMHeap->pLocalDevMemArena =
753 psDevMemHeapInfo->psLocalDevMemArena;
754 if (psBMHeap->pLocalDevMemArena == NULL) {
755 PVR_DPF(PVR_DBG_ERROR,
756 "BM_CreateHeap: LocalDevMemArena null");
761 psBMHeap->psNext = pBMContext->psBMHeap;
762 pBMContext->psBMHeap = psBMHeap;
764 return (void *)psBMHeap;
768 if (psBMHeap->pMMUHeap != NULL) {
769 psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
770 psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
773 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_HEAP),
779 void BM_DestroyHeap(void *hDevMemHeap)
781 struct BM_HEAP *psBMHeap = (struct BM_HEAP *)hDevMemHeap;
782 struct PVRSRV_DEVICE_NODE *psDeviceNode =
783 psBMHeap->pBMContext->psDeviceNode;
784 struct BM_HEAP **ppsBMHeap;
786 PVR_DPF(PVR_DBG_MESSAGE, "BM_DestroyHeap");
788 if (psBMHeap->ui32Attribs &
789 (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG |
790 PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
791 if (psBMHeap->pImportArena)
792 RA_Delete(psBMHeap->pImportArena);
794 PVR_DPF(PVR_DBG_ERROR,
795 "BM_DestroyHeap: backing store type unsupported");
799 psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
801 ppsBMHeap = &psBMHeap->pBMContext->psBMHeap;
803 if (*ppsBMHeap == psBMHeap) {
804 *ppsBMHeap = psBMHeap->psNext;
805 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
806 sizeof(struct BM_HEAP), psBMHeap,
810 ppsBMHeap = &((*ppsBMHeap)->psNext);
814 IMG_BOOL BM_Reinitialise(struct PVRSRV_DEVICE_NODE *psDeviceNode)
816 PVR_DPF(PVR_DBG_MESSAGE, "BM_Reinitialise");
817 PVR_UNREFERENCED_PARAMETER(psDeviceNode);
822 IMG_BOOL BM_Alloc(void *hDevMemHeap, struct IMG_DEV_VIRTADDR *psDevVAddr,
823 size_t uSize, u32 *pui32Flags, u32 uDevVAddrAlignment,
827 struct BM_CONTEXT *pBMContext;
828 struct BM_HEAP *psBMHeap;
829 struct SYS_DATA *psSysData;
832 if (pui32Flags == NULL) {
833 PVR_DPF(PVR_DBG_ERROR, "BM_Alloc: invalid parameter");
838 uFlags = *pui32Flags;
840 PVR_DPF(PVR_DBG_MESSAGE,
841 "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
842 uSize, uFlags, uDevVAddrAlignment);
844 if (SysAcquireData(&psSysData) != PVRSRV_OK)
847 psBMHeap = (struct BM_HEAP *)hDevMemHeap;
848 pBMContext = psBMHeap->pBMContext;
850 if (uDevVAddrAlignment == 0)
851 uDevVAddrAlignment = 1;
853 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF),
854 (void **)&pBuf, NULL) != PVRSRV_OK) {
855 PVR_DPF(PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED");
858 OSMemSet(pBuf, 0, sizeof(struct BM_BUF));
860 if (AllocMemory(pBMContext, psBMHeap, psDevVAddr, uSize, uFlags,
861 uDevVAddrAlignment, pBuf) != IMG_TRUE) {
862 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF), pBuf,
864 PVR_DPF(PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED");
868 PVR_DPF(PVR_DBG_MESSAGE, "BM_Alloc (uSize=0x%x, uFlags=0x%x)=%08X",
869 uSize, uFlags, pBuf);
871 pBuf->ui32RefCount = 1;
872 pvr_get_ctx(pBMContext);
873 *phBuf = (void *) pBuf;
874 *pui32Flags = uFlags | psBMHeap->ui32Attribs;
879 IMG_BOOL BM_Wrap(void *hDevMemHeap, u32 ui32Size, u32 ui32Offset,
880 IMG_BOOL bPhysContig, struct IMG_SYS_PHYADDR *psSysAddr,
881 void *pvCPUVAddr, u32 *pui32Flags, void **phBuf)
884 struct BM_CONTEXT *psBMContext;
885 struct BM_HEAP *psBMHeap;
886 struct SYS_DATA *psSysData;
887 struct IMG_SYS_PHYADDR sHashAddress;
890 psBMHeap = (struct BM_HEAP *)hDevMemHeap;
891 psBMContext = psBMHeap->pBMContext;
893 uFlags = psBMHeap->ui32Attribs &
894 (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);
897 uFlags |= *pui32Flags;
899 PVR_DPF(PVR_DBG_MESSAGE, "BM_Wrap (uSize=0x%x, uOffset=0x%x, "
900 "bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
901 ui32Size, ui32Offset, bPhysContig, pvCPUVAddr, uFlags);
903 if (SysAcquireData(&psSysData) != PVRSRV_OK)
906 sHashAddress = psSysAddr[0];
908 sHashAddress.uiAddr += ui32Offset;
910 pBuf = (struct BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash,
911 (u32) sHashAddress.uiAddr);
914 u32 ui32MappingSize =
915 HOST_PAGEALIGN(ui32Size + ui32Offset);
917 if (pBuf->pMapping->uSize == ui32MappingSize &&
918 (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
919 pBuf->pMapping->eCpuMemoryOrigin ==
920 hm_wrapped_virtaddr)) {
921 PVR_DPF(PVR_DBG_MESSAGE, "BM_Wrap "
922 "(Matched previous Wrap! uSize=0x%x, "
923 "uOffset=0x%x, SysAddr=%08X)",
924 ui32Size, ui32Offset, sHashAddress.uiAddr);
926 pBuf->ui32RefCount++;
927 *phBuf = (void *)pBuf;
929 *pui32Flags = uFlags;
935 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF),
936 (void **)&pBuf, NULL) != PVRSRV_OK) {
937 PVR_DPF(PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED");
940 OSMemSet(pBuf, 0, sizeof(struct BM_BUF));
942 if (WrapMemory(psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr,
943 pvCPUVAddr, uFlags, pBuf) != IMG_TRUE) {
944 PVR_DPF(PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED");
945 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF), pBuf,
950 if (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
951 pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr) {
953 PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr ==
954 pBuf->CpuPAddr.uiAddr);
956 if (!HASH_Insert(psBMContext->pBufferHash,
957 (u32)sHashAddress.uiAddr, (u32) pBuf)) {
958 FreeBuf(pBuf, uFlags);
959 PVR_DPF(PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED");
964 PVR_DPF(PVR_DBG_MESSAGE,
965 "BM_Wrap (uSize=0x%x, uFlags=0x%x)=%08X(devVAddr=%08X)",
966 ui32Size, uFlags, pBuf, pBuf->DevVAddr.uiAddr);
968 pBuf->ui32RefCount = 1;
969 pvr_get_ctx(psBMContext);
970 *phBuf = (void *) pBuf;
972 *pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) |
973 PVRSRV_HAP_MULTI_PROCESS;
978 void BM_Free(void *hBuf, u32 ui32Flags)
980 struct BM_BUF *pBuf = (struct BM_BUF *)hBuf;
981 struct SYS_DATA *psSysData;
982 struct IMG_SYS_PHYADDR sHashAddr;
984 PVR_DPF(PVR_DBG_MESSAGE, "BM_Free (h=%08X)", hBuf);
985 PVR_ASSERT(pBuf != NULL);
988 PVR_DPF(PVR_DBG_ERROR, "BM_Free: invalid parameter");
992 if (SysAcquireData(&psSysData) != PVRSRV_OK)
995 pBuf->ui32RefCount--;
997 if (pBuf->ui32RefCount == 0) {
998 struct BM_MAPPING *map = pBuf->pMapping;
999 struct BM_CONTEXT *ctx = map->pBMHeap->pBMContext;
1001 if (map->eCpuMemoryOrigin == hm_wrapped ||
1002 map->eCpuMemoryOrigin == hm_wrapped_virtaddr) {
1003 sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
1005 HASH_Remove(ctx->pBufferHash, (u32)sHashAddr.uiAddr);
1007 FreeBuf(pBuf, ui32Flags);
1012 void *BM_HandleToCpuVaddr(void *hBuf)
1014 struct BM_BUF *pBuf = (struct BM_BUF *)hBuf;
1016 PVR_ASSERT(pBuf != NULL);
1018 PVR_DPF(PVR_DBG_ERROR,
1019 "BM_HandleToCpuVaddr: invalid parameter");
1023 PVR_DPF(PVR_DBG_MESSAGE,
1024 "BM_HandleToCpuVaddr(h=%08X)=%08X", hBuf, pBuf->CpuVAddr);
1025 return pBuf->CpuVAddr;
1028 struct IMG_DEV_VIRTADDR BM_HandleToDevVaddr(void *hBuf)
1030 struct BM_BUF *pBuf = (struct BM_BUF *)hBuf;
1032 PVR_ASSERT(pBuf != NULL);
1034 struct IMG_DEV_VIRTADDR DevVAddr = { 0 };
1035 PVR_DPF(PVR_DBG_ERROR,
1036 "BM_HandleToDevVaddr: invalid parameter");
1040 PVR_DPF(PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=%08X)=%08X", hBuf,
1042 return pBuf->DevVAddr;
1045 struct IMG_SYS_PHYADDR BM_HandleToSysPaddr(void *hBuf)
1047 struct BM_BUF *pBuf = (struct BM_BUF *)hBuf;
1049 PVR_ASSERT(pBuf != NULL);
1052 struct IMG_SYS_PHYADDR PhysAddr = { 0 };
1053 PVR_DPF(PVR_DBG_ERROR,
1054 "BM_HandleToSysPaddr: invalid parameter");
1058 PVR_DPF(PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=%08X)=%08X", hBuf,
1059 pBuf->CpuPAddr.uiAddr);
1060 return SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
1063 void *BM_HandleToOSMemHandle(void *hBuf)
1065 struct BM_BUF *pBuf = (struct BM_BUF *)hBuf;
1067 PVR_ASSERT(pBuf != NULL);
1070 PVR_DPF(PVR_DBG_ERROR,
1071 "BM_HandleToOSMemHandle: invalid parameter");
1075 PVR_DPF(PVR_DBG_MESSAGE,
1076 "BM_HandleToOSMemHandle(h=%08X)=%08X",
1077 hBuf, pBuf->hOSMemHandle);
1078 return pBuf->hOSMemHandle;
1081 IMG_BOOL BM_ContiguousStatistics(u32 uFlags, u32 *pTotalBytes,
1082 u32 *pAvailableBytes)
1084 if (pAvailableBytes || pTotalBytes || uFlags)
1089 static IMG_BOOL DevMemoryAlloc(struct BM_CONTEXT *pBMContext,
1090 struct BM_MAPPING *pMapping, u32 uFlags, u32 dev_vaddr_alignment,
1091 struct IMG_DEV_VIRTADDR *pDevVAddr)
1093 struct PVRSRV_DEVICE_NODE *psDeviceNode = pBMContext->psDeviceNode;
1095 if (uFlags & PVRSRV_MEM_INTERLEAVED)
1096 pMapping->uSize *= 2;
1098 if (!psDeviceNode->pfnMMUAlloc(pMapping->pBMHeap->pMMUHeap,
1099 pMapping->uSize, 0, dev_vaddr_alignment,
1100 &(pMapping->DevVAddr))) {
1101 PVR_DPF(PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc");
1107 u32 ui32PDumpSize = pMapping->uSize;
1109 if (uFlags & PVRSRV_MEM_DUMMY)
1111 pMapping->pBMHeap->sDevArena.ui32DataPageSize;
1113 PDUMPMALLOCPAGES(psDeviceNode->sDevId.eDeviceType,
1114 pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr,
1115 pMapping->hOSMemHandle, ui32PDumpSize,
1120 switch (pMapping->eCpuMemoryOrigin) {
1122 case hm_wrapped_virtaddr:
1125 psDeviceNode->pfnMMUMapPages(pMapping->pBMHeap->
1128 SysCpuPAddrToSysPAddr
1129 (pMapping->CpuPAddr),
1130 pMapping->uSize, uFlags,
1133 *pDevVAddr = pMapping->DevVAddr;
1138 psDeviceNode->pfnMMUMapShadow(pMapping->pBMHeap->
1143 pMapping->hOSMemHandle,
1148 case hm_wrapped_scatter:
1149 case hm_wrapped_scatter_virtaddr:
1151 psDeviceNode->pfnMMUMapScatter(pMapping->pBMHeap->
1154 pMapping->psSysAddr,
1155 pMapping->uSize, uFlags,
1158 *pDevVAddr = pMapping->DevVAddr;
1162 PVR_DPF(PVR_DBG_ERROR,
1163 "Illegal value %d for pMapping->eCpuMemoryOrigin",
1164 pMapping->eCpuMemoryOrigin);
1172 static void DevMemoryFree(struct BM_MAPPING *pMapping)
1174 struct PVRSRV_DEVICE_NODE *psDeviceNode;
1178 if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
1179 ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
1181 ui32PSize = pMapping->uSize;
1183 PDUMPFREEPAGES(pMapping->pBMHeap, pMapping->DevVAddr, ui32PSize,
1184 (void *)pMapping, (IMG_BOOL)(pMapping->
1185 ui32Flags & PVRSRV_MEM_INTERLEAVED));
1188 psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
1190 psDeviceNode->pfnMMUFree(pMapping->pBMHeap->pMMUHeap,
1191 pMapping->DevVAddr, pMapping->uSize);
1194 static IMG_BOOL BM_ImportMemory(void *pH, size_t uRequestSize,
1195 size_t *pActualSize, struct BM_MAPPING **ppsMapping,
1196 u32 uFlags, u32 *pBase)
1198 struct BM_MAPPING *pMapping;
1199 struct BM_HEAP *pBMHeap = pH;
1200 struct BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
1204 u32 uDevVAddrAlignment = 0;
1206 PVR_DPF(PVR_DBG_MESSAGE,
1207 "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, "
1208 "uFlags=0x%x, uAlign=0x%x)",
1209 pBMContext, uRequestSize, uFlags, uDevVAddrAlignment);
1211 PVR_ASSERT(ppsMapping != NULL);
1212 PVR_ASSERT(pBMContext != NULL);
1214 if (ppsMapping == NULL) {
1215 PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter");
1219 uSize = HOST_PAGEALIGN(uRequestSize);
1220 PVR_ASSERT(uSize >= uRequestSize);
1222 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING),
1223 (void **)&pMapping, NULL) != PVRSRV_OK) {
1224 PVR_DPF(PVR_DBG_ERROR,
1225 "BM_ImportMemory: failed struct BM_MAPPING alloc");
1229 pMapping->hOSMemHandle = NULL;
1230 pMapping->CpuVAddr = NULL;
1231 pMapping->DevVAddr.uiAddr = 0;
1232 pMapping->CpuPAddr.uiAddr = 0;
1233 pMapping->uSize = uSize;
1234 pMapping->pBMHeap = pBMHeap;
1235 pMapping->ui32Flags = uFlags;
1238 *pActualSize = uSize;
1240 if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
1241 uPSize = pBMHeap->sDevArena.ui32DataPageSize;
1243 uPSize = pMapping->uSize;
1245 if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
1246 if (OSAllocPages(pBMHeap->ui32Attribs, uPSize,
1247 pBMHeap->sDevArena.ui32DataPageSize,
1248 (void **)&pMapping->CpuVAddr,
1249 &pMapping->hOSMemHandle) != PVRSRV_OK) {
1250 PVR_DPF(PVR_DBG_ERROR,
1251 "BM_ImportMemory: OSAllocPages(0x%x) failed",
1253 goto fail_mapping_alloc;
1256 pMapping->eCpuMemoryOrigin = hm_env;
1257 } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
1258 struct IMG_SYS_PHYADDR sSysPAddr;
1260 PVR_ASSERT(pBMHeap->pLocalDevMemArena != NULL);
1262 if (!RA_Alloc(pBMHeap->pLocalDevMemArena, uPSize, NULL, 0,
1263 pBMHeap->sDevArena.ui32DataPageSize,
1264 (u32 *)&sSysPAddr.uiAddr)) {
1265 PVR_DPF(PVR_DBG_ERROR,
1266 "BM_ImportMemory: RA_Alloc(0x%x) FAILED",
1268 goto fail_mapping_alloc;
1271 pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
1272 if (OSReservePhys(pMapping->CpuPAddr, uPSize,
1273 pBMHeap->ui32Attribs, &pMapping->CpuVAddr,
1274 &pMapping->hOSMemHandle) != PVRSRV_OK) {
1275 PVR_DPF(PVR_DBG_ERROR,
1276 "BM_ImportMemory: OSReservePhys failed");
1277 goto fail_dev_mem_alloc;
1280 pMapping->eCpuMemoryOrigin = hm_contiguous;
1282 PVR_DPF(PVR_DBG_ERROR,
1283 "BM_ImportMemory: Invalid backing store type");
1284 goto fail_mapping_alloc;
1287 bResult = DevMemoryAlloc(pBMContext, pMapping, uFlags,
1288 uDevVAddrAlignment, &pMapping->DevVAddr);
1290 PVR_DPF(PVR_DBG_ERROR,
1291 "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
1293 goto fail_dev_mem_alloc;
1296 PVR_ASSERT(uDevVAddrAlignment > 1 ?
1297 (pMapping->DevVAddr.uiAddr % uDevVAddrAlignment) == 0 : 1);
1299 *pBase = pMapping->DevVAddr.uiAddr;
1300 *ppsMapping = pMapping;
1302 PVR_DPF(PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE");
1306 if (pMapping->CpuVAddr || pMapping->hOSMemHandle) {
1307 if (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
1308 pMapping->uSize /= 2;
1310 if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
1311 uPSize = pBMHeap->sDevArena.ui32DataPageSize;
1313 uPSize = pMapping->uSize;
1315 if (pBMHeap->ui32Attribs &
1316 PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
1317 OSFreePages(pBMHeap->ui32Attribs, uPSize,
1318 (void *)pMapping->CpuVAddr,
1319 pMapping->hOSMemHandle);
1321 struct IMG_SYS_PHYADDR sSysPAddr;
1323 if (pMapping->CpuVAddr)
1324 OSUnReservePhys(pMapping->CpuVAddr, uPSize,
1325 pBMHeap->ui32Attribs,
1326 pMapping->hOSMemHandle);
1327 sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
1328 RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr,
1333 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping,
1339 static void BM_FreeMemory(void *h, u32 _base, struct BM_MAPPING *psMapping)
1341 struct BM_HEAP *pBMHeap = h;
1344 PVR_UNREFERENCED_PARAMETER(_base);
1346 PVR_DPF(PVR_DBG_MESSAGE,
1347 "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base,
1350 PVR_ASSERT(psMapping != NULL);
1352 if (psMapping == NULL) {
1353 PVR_DPF(PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter");
1357 DevMemoryFree(psMapping);
1359 if ((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0)
1360 psMapping->uSize /= 2;
1362 if (psMapping->ui32Flags & PVRSRV_MEM_DUMMY)
1363 uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize;
1365 uPSize = psMapping->uSize;
1367 if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
1368 OSFreePages(pBMHeap->ui32Attribs, uPSize,
1369 (void *)psMapping->CpuVAddr,
1370 psMapping->hOSMemHandle);
1371 } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
1372 struct IMG_SYS_PHYADDR sSysPAddr;
1374 OSUnReservePhys(psMapping->CpuVAddr, uPSize,
1375 pBMHeap->ui32Attribs, psMapping->hOSMemHandle);
1377 sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
1379 RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr,
1382 PVR_DPF(PVR_DBG_ERROR,
1383 "BM_FreeMemory: Invalid backing store type");
1386 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), psMapping,
1389 PVR_DPF(PVR_DBG_MESSAGE,
1390 "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)",
1391 h, _base, psMapping);
1394 enum PVRSRV_ERROR BM_GetPhysPageAddr(struct PVRSRV_KERNEL_MEM_INFO *psMemInfo,
1395 struct IMG_DEV_VIRTADDR sDevVPageAddr,
1396 struct IMG_DEV_PHYADDR *psDevPAddr)
1398 struct PVRSRV_DEVICE_NODE *psDeviceNode;
1400 PVR_DPF(PVR_DBG_MESSAGE, "BM_GetPhysPageAddr");
1402 if (!psMemInfo || !psDevPAddr) {
1403 PVR_DPF(PVR_DBG_ERROR, "BM_GetPhysPageAddr: Invalid params");
1404 return PVRSRV_ERROR_INVALID_PARAMS;
1408 ((struct BM_BUF *)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->
1409 pBMContext->psDeviceNode;
1411 *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((struct BM_BUF *)
1412 psMemInfo->sMemBlk.hBuffer)->
1413 pMapping->pBMHeap->pMMUHeap, sDevVPageAddr);
1418 enum PVRSRV_ERROR BM_GetHeapInfo(void *hDevMemHeap,
1419 struct PVRSRV_HEAP_INFO *psHeapInfo)
1421 struct BM_HEAP *psBMHeap = (struct BM_HEAP *)hDevMemHeap;
1423 PVR_DPF(PVR_DBG_VERBOSE, "BM_GetHeapInfo");
1425 psHeapInfo->hDevMemHeap = hDevMemHeap;
1426 psHeapInfo->sDevVAddrBase = psBMHeap->sDevArena.BaseDevVAddr;
1427 psHeapInfo->ui32HeapByteSize = psBMHeap->sDevArena.ui32Size;
1428 psHeapInfo->ui32Attribs = psBMHeap->ui32Attribs;
1433 struct MMU_CONTEXT *BM_GetMMUContext(void *hDevMemHeap)
1435 struct BM_HEAP *pBMHeap = (struct BM_HEAP *)hDevMemHeap;
1437 PVR_DPF(PVR_DBG_VERBOSE, "BM_GetMMUContext");
1439 return pBMHeap->pBMContext->psMMUContext;
1442 struct MMU_CONTEXT *BM_GetMMUContextFromMemContext(void *hDevMemContext)
1444 struct BM_CONTEXT *pBMContext = (struct BM_CONTEXT *)hDevMemContext;
1446 PVR_DPF(PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext");
1448 return pBMContext->psMMUContext;
1451 void *BM_GetMMUHeap(void *hDevMemHeap)
1453 PVR_DPF(PVR_DBG_VERBOSE, "BM_GetMMUHeap");
1455 return (void *)((struct BM_HEAP *)hDevMemHeap)->pMMUHeap;
1458 struct PVRSRV_DEVICE_NODE *BM_GetDeviceNode(void *hDevMemContext)
1460 PVR_DPF(PVR_DBG_VERBOSE, "BM_GetDeviceNode");
1462 return ((struct BM_CONTEXT *)hDevMemContext)->psDeviceNode;
1465 void *BM_GetMappingHandle(struct PVRSRV_KERNEL_MEM_INFO *psMemInfo)
1467 PVR_DPF(PVR_DBG_VERBOSE, "BM_GetMappingHandle");
1469 return ((struct BM_BUF *)
1470 psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
1473 struct BM_CONTEXT *bm_find_context(struct BM_CONTEXT *head_context,
1476 struct BM_CONTEXT *context = head_context;
1478 /* Walk all the contexts until we find the right one */
1480 if (mmu_get_page_dir(context->psMMUContext) == page_dir)
1482 context = context->psNext;