"DRM_PVR2D_CFLUSH_FROM_GPU 0x%08x, length 0x%08x\n",
ui32Virt, ui32Length);
#ifdef CONFIG_ARM
- dmac_inv_range((const void *)ui32Virt,
- (const void *)(ui32Virt + ui32Length));
+ dmac_map_area((const void *)ui32Virt, ui32Length, DMA_FROM_DEVICE);
#endif
return 0;
case DRM_PVR2D_CFLUSH_TO_GPU:
"DRM_PVR2D_CFLUSH_TO_GPU 0x%08x, length 0x%08x\n",
ui32Virt, ui32Length);
#ifdef CONFIG_ARM
- dmac_clean_range((const void *)ui32Virt,
- (const void *)(ui32Virt + ui32Length));
+ dmac_map_area((const void *)ui32Virt, ui32Length, DMA_TO_DEVICE);
#endif
return 0;
default:
u32 pg_ofs;
u32 vaddr, vaddr_end;
+ extern void ___dma_single_dev_to_cpu(const void *, size_t,
+ enum dma_data_direction);
+
vaddr = (u32)mem_area->uData.sVmalloc.pvVmallocAddress;
vaddr_end = vaddr + mem_area->ui32ByteSize;
pg_cnt = (PAGE_ALIGN(vaddr_end) - (vaddr & PAGE_MASK)) / PAGE_SIZE;
pg_ofs = vaddr & ~PAGE_MASK;
kaddr += pg_ofs;
chunk = min_t(ssize_t, vaddr_end - vaddr, PAGE_SIZE - pg_ofs);
- dma_cache_maint(kaddr, chunk, DMA_FROM_DEVICE);
+ ___dma_single_dev_to_cpu(kaddr, chunk, DMA_FROM_DEVICE);
vaddr += chunk;
}
}
u32 pg_cnt;
struct page **pg_list;
+ extern void ___dma_single_dev_to_cpu(const void *, size_t,
+ enum dma_data_direction);
+
pg_cnt = RANGE_TO_PAGES(mem_area->ui32ByteSize);
pg_list = mem_area->uData.sPageList.pvPageList;
while (pg_cnt--)
- dma_cache_maint(page_address(*pg_list++), PAGE_SIZE,
+ ___dma_single_dev_to_cpu(page_address(*pg_list++), PAGE_SIZE,
DMA_FROM_DEVICE);
}
#include "services_headers.h"
#include "resman.h"
-static DECLARE_MUTEX(lock);
+static DEFINE_SEMAPHORE(lock);
#define ACQUIRE_SYNC_OBJ do { \
if (in_interrupt()) { \