return 0;
}
+/*
+ * dma_map_single can't fail as it is implemented now.
+ */
+static inline int dma_mapping_error(dma_addr_t addr)
+{
+ return 0;
+}
+
/**
* dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
- dma_cache_sync(cpu_addr, size, direction);
+ dma_cache_sync(dev, cpu_addr, size, direction);
return virt_to_bus(cpu_addr);
}
sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
virt = page_address(sg[i].page) + sg[i].offset;
- dma_cache_sync(virt, sg[i].length, direction);
+ dma_cache_sync(dev, virt, sg[i].length, direction);
}
return nents;
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
- dma_cache_sync(bus_to_virt(dma_handle), size, direction);
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
- dma_cache_sync(bus_to_virt(dma_handle), size, direction);
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
}
/**
int i;
for (i = 0; i < nents; i++) {
- dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+ dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
sg[i].length, direction);
}
}
int i;
for (i = 0; i < nents; i++) {
- dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+ dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
sg[i].length, direction);
}
}