1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
9 #include <asm/calgary.h>
11 int forbid_dac __read_mostly;
12 EXPORT_SYMBOL(forbid_dac);
14 const struct dma_mapping_ops *dma_ops;
15 EXPORT_SYMBOL(dma_ops);
17 static int iommu_sac_force __read_mostly;
19 #ifdef CONFIG_IOMMU_DEBUG
20 int panic_on_overflow __read_mostly = 1;
21 int force_iommu __read_mostly = 1;
23 int panic_on_overflow __read_mostly = 0;
24 int force_iommu __read_mostly = 0;
27 int iommu_merge __read_mostly = 0;
29 int no_iommu __read_mostly;
30 /* Set this to 1 if there is a HW IOMMU in the system */
31 int iommu_detected __read_mostly = 0;
33 /* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35 int iommu_bio_merge __read_mostly = 0;
36 EXPORT_SYMBOL(iommu_bio_merge);
38 dma_addr_t bad_dma_address __read_mostly = 0;
39 EXPORT_SYMBOL(bad_dma_address);
41 /* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
44 struct device fallback_dev = {
45 .bus_id = "fallback device",
46 .coherent_dma_mask = DMA_32BIT_MASK,
47 .dma_mask = &fallback_dev.coherent_dma_mask,
50 int dma_set_mask(struct device *dev, u64 mask)
52 if (!dev->dma_mask || !dma_supported(dev, mask))
55 *dev->dma_mask = mask;
59 EXPORT_SYMBOL(dma_set_mask);
62 static __initdata void *dma32_bootmem_ptr;
63 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
65 static int __init parse_dma32_size_opt(char *p)
69 dma32_bootmem_size = memparse(p, &p);
72 early_param("dma32_size", parse_dma32_size_opt);
74 void __init dma32_reserve_bootmem(void)
76 unsigned long size, align;
77 if (end_pfn <= MAX_DMA32_PFN)
81 size = round_up(dma32_bootmem_size, align);
82 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
83 __pa(MAX_DMA_ADDRESS));
84 if (dma32_bootmem_ptr)
85 dma32_bootmem_size = size;
87 dma32_bootmem_size = 0;
89 static void __init dma32_free_bootmem(void)
93 if (end_pfn <= MAX_DMA32_PFN)
96 if (!dma32_bootmem_ptr)
99 for_each_online_node(node)
100 free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
103 dma32_bootmem_ptr = NULL;
104 dma32_bootmem_size = 0;
107 void __init pci_iommu_alloc(void)
109 /* free the range so iommu could get some range less than 4G */
110 dma32_free_bootmem();
112 * The order of these functions is important for
113 * fall-back/fail-over reasons
115 #ifdef CONFIG_GART_IOMMU
116 gart_iommu_hole_init();
119 #ifdef CONFIG_CALGARY_IOMMU
123 detect_intel_iommu();
125 #ifdef CONFIG_SWIOTLB
132 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
135 static __init int iommu_setup(char *p)
143 if (!strncmp(p, "off", 3))
145 /* gart_parse_options has more force support */
146 if (!strncmp(p, "force", 5))
148 if (!strncmp(p, "noforce", 7)) {
153 if (!strncmp(p, "biomerge", 8)) {
154 iommu_bio_merge = 4096;
158 if (!strncmp(p, "panic", 5))
159 panic_on_overflow = 1;
160 if (!strncmp(p, "nopanic", 7))
161 panic_on_overflow = 0;
162 if (!strncmp(p, "merge", 5)) {
166 if (!strncmp(p, "nomerge", 7))
168 if (!strncmp(p, "forcesac", 8))
170 if (!strncmp(p, "allowdac", 8))
172 if (!strncmp(p, "nodac", 5))
174 if (!strncmp(p, "usedac", 6)) {
178 #ifdef CONFIG_SWIOTLB
179 if (!strncmp(p, "soft", 4))
183 #ifdef CONFIG_GART_IOMMU
184 gart_parse_options(p);
187 #ifdef CONFIG_CALGARY_IOMMU
188 if (!strncmp(p, "calgary", 7))
190 #endif /* CONFIG_CALGARY_IOMMU */
192 p += strcspn(p, ",");
198 early_param("iommu", iommu_setup);
200 int dma_supported(struct device *dev, u64 mask)
203 if (mask > 0xffffffff && forbid_dac > 0) {
204 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
210 if (dma_ops->dma_supported)
211 return dma_ops->dma_supported(dev, mask);
213 /* Copied from i386. Doesn't make much sense, because it will
214 only work for pci_alloc_coherent.
215 The caller just has to use GFP_DMA in this case. */
216 if (mask < DMA_24BIT_MASK)
219 /* Tell the device to use SAC when IOMMU force is on. This
220 allows the driver to use cheaper accesses in some cases.
222 Problem with this is that if we overflow the IOMMU area and
223 return DAC as fallback address the device may not handle it
226 As a special case some controllers have a 39bit address
227 mode that is as efficient as 32bit (aic79xx). Don't force
228 SAC for these. Assume all masks <= 40 bits are of this
229 type. Normally this doesn't make any difference, but gives
230 more gentle handling of IOMMU overflow. */
231 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
232 printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
239 EXPORT_SYMBOL(dma_supported);
241 /* Allocate DMA memory on node near device */
242 noinline struct page *
243 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
247 node = dev_to_node(dev);
249 return alloc_pages_node(node, gfp, order);
253 * Allocate memory for a coherent mapping.
256 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
261 unsigned long dma_mask = 0;
265 /* ignore region specifiers */
266 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
268 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
275 dma_mask = dev->coherent_dma_mask;
277 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
279 /* Device not DMA able */
280 if (dev->dma_mask == NULL)
283 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
288 /* Why <=? Even when the mask is smaller than 4GB it is often
289 larger than 16MB and in this case we have a chance of
290 finding fitting memory in the next higher zone first. If
291 not retry with true GFP_DMA. -AK */
292 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
294 if (dma_mask < DMA_32BIT_MASK)
300 page = dma_alloc_pages(dev,
301 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
307 bus = page_to_phys(page);
308 memory = page_address(page);
309 high = (bus + size) >= dma_mask;
311 if (force_iommu && !(gfp & GFP_DMA))
314 free_pages((unsigned long)memory,
317 /* Don't use the 16MB ZONE_DMA unless absolutely
318 needed. It's better to use remapping first. */
319 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
320 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
324 /* Let low level make its own zone decisions */
325 gfp &= ~(GFP_DMA32|GFP_DMA);
327 if (dma_ops->alloc_coherent)
328 return dma_ops->alloc_coherent(dev, size,
333 memset(memory, 0, size);
340 if (dma_ops->alloc_coherent) {
341 free_pages((unsigned long)memory, get_order(size));
342 gfp &= ~(GFP_DMA|GFP_DMA32);
343 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
346 if (dma_ops->map_simple) {
347 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
349 PCI_DMA_BIDIRECTIONAL);
350 if (*dma_handle != bad_dma_address)
354 if (panic_on_overflow)
355 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
356 (unsigned long)size);
357 free_pages((unsigned long)memory, get_order(size));
360 EXPORT_SYMBOL(dma_alloc_coherent);
363 * Unmap coherent memory.
364 * The caller must ensure that the device has finished accessing the mapping.
366 void dma_free_coherent(struct device *dev, size_t size,
367 void *vaddr, dma_addr_t bus)
369 int order = get_order(size);
370 WARN_ON(irqs_disabled()); /* for portability */
371 if (dma_release_from_coherent(dev, order, vaddr))
373 if (dma_ops->unmap_single)
374 dma_ops->unmap_single(dev, bus, size, 0);
375 free_pages((unsigned long)vaddr, order);
377 EXPORT_SYMBOL(dma_free_coherent);
379 static int __init pci_iommu_init(void)
381 #ifdef CONFIG_CALGARY_IOMMU
382 calgary_iommu_init();
387 #ifdef CONFIG_GART_IOMMU
395 void pci_iommu_shutdown(void)
397 gart_iommu_shutdown();
399 /* Must execute after PCI subsystem */
400 fs_initcall(pci_iommu_init);
403 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
405 static __devinit void via_no_dac(struct pci_dev *dev)
407 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
408 printk(KERN_INFO "PCI: VIA PCI bridge detected."
413 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);