ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */
-/*
- * dma_inv_range(start, end)
- *
- * Invalidate (discard) the specified virtual address range.
- * May not write back any entries. If 'start' or 'end'
- * are not cache line aligned, those lines must be written
- * back.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_inv_range)
- /* FALLTHROUGH */
-
/*
* dma_flush_range(start, end)
*
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
- /* FALLTHROUGH */
-
-/*
- * dma_clean_range(start, end)
- *
- * Clean (write back) the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_clean_range)
mov pc, lr
/*
*/
ENTRY(v3_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
- bne v3_dma_inv_range
+ bne v3_dma_flush_range
/* FALLTHROUGH */
/*
.long v3_flush_kern_dcache_area
.long v3_dma_map_area
.long v3_dma_unmap_area
- .long v3_dma_inv_range
- .long v3_dma_clean_range
.long v3_dma_flush_range
.size v3_cache_fns, . - v3_cache_fns