1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/bootmem.h>
6 #include <linux/kmemleak.h>
10 #include <asm/iommu.h>
12 #include <asm/calgary.h>
13 #include <asm/amd_iommu.h>
15 static int forbid_dac __read_mostly;
17 struct dma_map_ops *dma_ops;
18 EXPORT_SYMBOL(dma_ops);
20 static int iommu_sac_force __read_mostly;
22 #ifdef CONFIG_IOMMU_DEBUG
23 int panic_on_overflow __read_mostly = 1;
24 int force_iommu __read_mostly = 1;
26 int panic_on_overflow __read_mostly = 0;
27 int force_iommu __read_mostly = 0;
30 int iommu_merge __read_mostly = 0;
32 int no_iommu __read_mostly;
33 /* Set this to 1 if there is a HW IOMMU in the system */
34 int iommu_detected __read_mostly = 0;
36 int iommu_pass_through;
38 dma_addr_t bad_dma_address __read_mostly = 0;
39 EXPORT_SYMBOL(bad_dma_address);
41 /* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
44 struct device x86_dma_fallback_dev = {
45 .init_name = "fallback device",
46 .coherent_dma_mask = DMA_BIT_MASK(32),
47 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
49 EXPORT_SYMBOL(x86_dma_fallback_dev);
51 /* Number of entries preallocated for DMA-API debugging */
52 #define PREALLOC_DMA_DEBUG_ENTRIES 32768
54 int dma_set_mask(struct device *dev, u64 mask)
56 if (!dev->dma_mask || !dma_supported(dev, mask))
59 *dev->dma_mask = mask;
63 EXPORT_SYMBOL(dma_set_mask);
66 static __initdata void *dma32_bootmem_ptr;
67 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
69 static int __init parse_dma32_size_opt(char *p)
73 dma32_bootmem_size = memparse(p, &p);
76 early_param("dma32_size", parse_dma32_size_opt);
78 void __init dma32_reserve_bootmem(void)
80 unsigned long size, align;
81 if (max_pfn <= MAX_DMA32_PFN)
85 * check aperture_64.c allocate_aperture() for reason about
89 size = roundup(dma32_bootmem_size, align);
90 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
93 * Kmemleak should not scan this block as it may not be mapped via the
94 * kernel direct mapping.
96 kmemleak_ignore(dma32_bootmem_ptr);
97 if (dma32_bootmem_ptr)
98 dma32_bootmem_size = size;
100 dma32_bootmem_size = 0;
102 static void __init dma32_free_bootmem(void)
105 if (max_pfn <= MAX_DMA32_PFN)
108 if (!dma32_bootmem_ptr)
111 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
113 dma32_bootmem_ptr = NULL;
114 dma32_bootmem_size = 0;
118 void __init pci_iommu_alloc(void)
121 /* free the range so iommu could get some range less than 4G */
122 dma32_free_bootmem();
126 * The order of these functions is important for
127 * fall-back/fail-over reasons
129 gart_iommu_hole_init();
133 detect_intel_iommu();
140 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
141 dma_addr_t *dma_addr, gfp_t flag)
143 unsigned long dma_mask;
147 dma_mask = dma_alloc_coherent_mask(dev, flag);
151 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
155 addr = page_to_phys(page);
156 if (!is_buffer_dma_capable(dma_mask, addr, size)) {
157 __free_pages(page, get_order(size));
159 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
160 flag = (flag & ~GFP_DMA32) | GFP_DMA;
168 return page_address(page);
172 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
175 static __init int iommu_setup(char *p)
183 if (!strncmp(p, "off", 3))
185 /* gart_parse_options has more force support */
186 if (!strncmp(p, "force", 5))
188 if (!strncmp(p, "noforce", 7)) {
193 if (!strncmp(p, "biomerge", 8)) {
197 if (!strncmp(p, "panic", 5))
198 panic_on_overflow = 1;
199 if (!strncmp(p, "nopanic", 7))
200 panic_on_overflow = 0;
201 if (!strncmp(p, "merge", 5)) {
205 if (!strncmp(p, "nomerge", 7))
207 if (!strncmp(p, "forcesac", 8))
209 if (!strncmp(p, "allowdac", 8))
211 if (!strncmp(p, "nodac", 5))
213 if (!strncmp(p, "usedac", 6)) {
217 #ifdef CONFIG_SWIOTLB
218 if (!strncmp(p, "soft", 4))
221 if (!strncmp(p, "pt", 2)) {
222 iommu_pass_through = 1;
226 gart_parse_options(p);
228 #ifdef CONFIG_CALGARY_IOMMU
229 if (!strncmp(p, "calgary", 7))
231 #endif /* CONFIG_CALGARY_IOMMU */
233 p += strcspn(p, ",");
239 early_param("iommu", iommu_setup);
241 int dma_supported(struct device *dev, u64 mask)
243 struct dma_map_ops *ops = get_dma_ops(dev);
246 if (mask > 0xffffffff && forbid_dac > 0) {
247 dev_info(dev, "PCI: Disallowing DAC for device\n");
252 if (ops->dma_supported)
253 return ops->dma_supported(dev, mask);
255 /* Copied from i386. Doesn't make much sense, because it will
256 only work for pci_alloc_coherent.
257 The caller just has to use GFP_DMA in this case. */
258 if (mask < DMA_BIT_MASK(24))
261 /* Tell the device to use SAC when IOMMU force is on. This
262 allows the driver to use cheaper accesses in some cases.
264 Problem with this is that if we overflow the IOMMU area and
265 return DAC as fallback address the device may not handle it
268 As a special case some controllers have a 39bit address
269 mode that is as efficient as 32bit (aic79xx). Don't force
270 SAC for these. Assume all masks <= 40 bits are of this
271 type. Normally this doesn't make any difference, but gives
272 more gentle handling of IOMMU overflow. */
273 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
274 dev_info(dev, "Force SAC with mask %Lx\n", mask);
280 EXPORT_SYMBOL(dma_supported);
282 static int __init pci_iommu_init(void)
284 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
287 dma_debug_add_bus(&pci_bus_type);
290 calgary_iommu_init();
302 void pci_iommu_shutdown(void)
304 gart_iommu_shutdown();
306 amd_iommu_shutdown();
308 /* Must execute after PCI subsystem */
309 fs_initcall(pci_iommu_init);
312 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
314 static __devinit void via_no_dac(struct pci_dev *dev)
316 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
317 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
321 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);