static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
- if (hwdev && bus + size > *hwdev->dma_mask) {
+ if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) {
if (*hwdev->dma_mask >= DMA_32BIT_MASK)
printk(KERN_ERR
"nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
return nents;
}
-static void *
-nommu_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_addr, gfp_t gfp)
-{
- unsigned long dma_mask;
- int node;
- struct page *page;
-
- if (hwdev->dma_mask == NULL)
- return NULL;
-
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- gfp |= __GFP_ZERO;
-
- dma_mask = hwdev->coherent_dma_mask;
- if (!dma_mask)
- dma_mask = *(hwdev->dma_mask);
-
- if (dma_mask < DMA_24BIT_MASK)
- return NULL;
-
- node = dev_to_node(hwdev);
-
-#ifdef CONFIG_X86_64
- if (dma_mask <= DMA_32BIT_MASK)
- gfp |= GFP_DMA32;
-#endif
-
- /* No alloc-free penalty for ISA devices */
- if (dma_mask == DMA_24BIT_MASK)
- gfp |= GFP_DMA;
-
-again:
- page = alloc_pages_node(node, gfp, get_order(size));
- if (!page)
- return NULL;
-
- if ((page_to_phys(page) + size > dma_mask) && !(gfp & GFP_DMA)) {
- free_pages((unsigned long)page_address(page), get_order(size));
- gfp |= GFP_DMA;
- goto again;
- }
-
- *dma_addr = page_to_phys(page);
- if (check_addr("alloc_coherent", hwdev, *dma_addr, size)) {
- flush_write_buffers();
- return page_address(page);
- }
-
- free_pages((unsigned long)page_address(page), get_order(size));
-
- return NULL;
-}
-
static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr)
{
}
struct dma_mapping_ops nommu_dma_ops = {
- .alloc_coherent = nommu_alloc_coherent,
+ .alloc_coherent = dma_generic_alloc_coherent,
.free_coherent = nommu_free_coherent,
.map_single = nommu_map_single,
.map_sg = nommu_map_sg,