2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/pci.h>
23 #include <linux/module.h>
24 #include <linux/topology.h>
25 #include <linux/interrupt.h>
26 #include <linux/bitmap.h>
27 #include <linux/kdebug.h>
28 #include <linux/scatterlist.h>
29 #include <linux/iommu-helper.h>
30 #include <linux/sysdev.h>
32 #include <linux/gfp.h>
33 #include <asm/atomic.h>
35 #include <asm/pgtable.h>
36 #include <asm/proto.h>
37 #include <asm/iommu.h>
39 #include <asm/cacheflush.h>
40 #include <asm/swiotlb.h>
42 #include <asm/amd_nb.h>
43 #include <asm/x86_init.h>
45 static unsigned long iommu_bus_base; /* GART remapping area (physical) */
46 static unsigned long iommu_size; /* size of remapping area bytes */
47 static unsigned long iommu_pages; /* .. and in pages */
49 static u32 *iommu_gatt_base; /* Remapping table */
51 static dma_addr_t bad_dma_addr;
54 * If this is disabled the IOMMU will use an optimized flushing strategy
55 * of only flushing when an mapping is reused. With it true the GART is
56 * flushed for every mapping. Problem is that doing the lazy flush seems
57 * to trigger bugs with some popular PCI cards, in particular 3ware (but
58 * has been also also seen with Qlogic at least).
60 static int iommu_fullflush = 1;
62 /* Allocation bitmap for the remapping area: */
63 static DEFINE_SPINLOCK(iommu_bitmap_lock);
64 /* Guarded by iommu_bitmap_lock: */
65 static unsigned long *iommu_gart_bitmap;
67 static u32 gart_unmapped_entry;
70 #define GPTE_COHERENT 2
71 #define GPTE_ENCODE(x) \
72 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
73 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
75 #define EMERGENCY_PAGES 32 /* = 128KB */
78 #define AGPEXTERN extern
83 /* backdoor interface to AGP driver */
84 AGPEXTERN int agp_memory_reserved;
85 AGPEXTERN __u32 *agp_gatt_table;
87 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
88 static bool need_flush; /* global flush state. set for each gart wrap */
90 static unsigned long alloc_iommu(struct device *dev, int size,
91 unsigned long align_mask)
93 unsigned long offset, flags;
94 unsigned long boundary_size;
95 unsigned long base_index;
97 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
98 PAGE_SIZE) >> PAGE_SHIFT;
99 boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
100 PAGE_SIZE) >> PAGE_SHIFT;
102 spin_lock_irqsave(&iommu_bitmap_lock, flags);
103 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
104 size, base_index, boundary_size, align_mask);
107 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
108 size, base_index, boundary_size,
112 next_bit = offset+size;
113 if (next_bit >= iommu_pages) {
120 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
125 static void free_iommu(unsigned long offset, int size)
129 spin_lock_irqsave(&iommu_bitmap_lock, flags);
130 bitmap_clear(iommu_gart_bitmap, offset, size);
131 if (offset >= next_bit)
132 next_bit = offset + size;
133 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
137 * Use global flush state to avoid races with multiple flushers.
139 static void flush_gart(void)
143 spin_lock_irqsave(&iommu_bitmap_lock, flags);
148 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
151 #ifdef CONFIG_IOMMU_LEAK
152 /* Debugging aid for drivers that don't free their IOMMU tables */
153 static int leak_trace;
154 static int iommu_leak_pages = 20;
156 static void dump_leak(void)
164 show_stack(NULL, NULL);
165 debug_dma_dump_mappings(NULL);
169 static void iommu_full(struct device *dev, size_t size, int dir)
172 * Ran out of IOMMU space for this operation. This is very bad.
173 * Unfortunately the drivers cannot handle this operation properly.
174 * Return some non mapped prereserved space in the aperture and
175 * let the Northbridge deal with it. This will result in garbage
176 * in the IO operation. When the size exceeds the prereserved space
177 * memory corruption will occur or random memory will be DMAed
178 * out. Hopefully no network devices use single mappings that big.
181 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
183 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
184 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
185 panic("PCI-DMA: Memory would be corrupted\n");
186 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
188 "PCI-DMA: Random memory would be DMAed\n");
190 #ifdef CONFIG_IOMMU_LEAK
196 need_iommu(struct device *dev, unsigned long addr, size_t size)
198 return force_iommu || !dma_capable(dev, addr, size);
202 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
204 return !dma_capable(dev, addr, size);
207 /* Map a single continuous physical area into the IOMMU.
208 * Caller needs to check if the iommu is needed and flush.
210 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
211 size_t size, int dir, unsigned long align_mask)
213 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
214 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
217 if (iommu_page == -1) {
218 if (!nonforced_iommu(dev, phys_mem, size))
220 if (panic_on_overflow)
221 panic("dma_map_area overflow %lu bytes\n", size);
222 iommu_full(dev, size, dir);
226 for (i = 0; i < npages; i++) {
227 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
228 phys_mem += PAGE_SIZE;
230 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
233 /* Map a single area into the IOMMU */
234 static dma_addr_t gart_map_page(struct device *dev, struct page *page,
235 unsigned long offset, size_t size,
236 enum dma_data_direction dir,
237 struct dma_attrs *attrs)
240 phys_addr_t paddr = page_to_phys(page) + offset;
243 dev = &x86_dma_fallback_dev;
245 if (!need_iommu(dev, paddr, size))
248 bus = dma_map_area(dev, paddr, size, dir, 0);
255 * Free a DMA mapping.
257 static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
258 size_t size, enum dma_data_direction dir,
259 struct dma_attrs *attrs)
261 unsigned long iommu_page;
265 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
266 dma_addr >= iommu_bus_base + iommu_size)
269 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
270 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
271 for (i = 0; i < npages; i++) {
272 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
274 free_iommu(iommu_page, npages);
278 * Wrapper for pci_unmap_single working with scatterlists.
280 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
281 enum dma_data_direction dir, struct dma_attrs *attrs)
283 struct scatterlist *s;
286 for_each_sg(sg, s, nents, i) {
287 if (!s->dma_length || !s->length)
289 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
293 /* Fallback for dma_map_sg in case of overflow */
294 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
297 struct scatterlist *s;
300 #ifdef CONFIG_IOMMU_DEBUG
301 pr_debug("dma_map_sg overflow\n");
304 for_each_sg(sg, s, nents, i) {
305 unsigned long addr = sg_phys(s);
307 if (nonforced_iommu(dev, addr, s->length)) {
308 addr = dma_map_area(dev, addr, s->length, dir, 0);
309 if (addr == bad_dma_addr) {
311 gart_unmap_sg(dev, sg, i, dir, NULL);
313 sg[0].dma_length = 0;
317 s->dma_address = addr;
318 s->dma_length = s->length;
325 /* Map multiple scatterlist entries continuous into the first. */
326 static int __dma_map_cont(struct device *dev, struct scatterlist *start,
327 int nelems, struct scatterlist *sout,
330 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
331 unsigned long iommu_page = iommu_start;
332 struct scatterlist *s;
335 if (iommu_start == -1)
338 for_each_sg(start, s, nelems, i) {
339 unsigned long pages, addr;
340 unsigned long phys_addr = s->dma_address;
342 BUG_ON(s != start && s->offset);
344 sout->dma_address = iommu_bus_base;
345 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
346 sout->dma_length = s->length;
348 sout->dma_length += s->length;
352 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
354 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
359 BUG_ON(iommu_page - iommu_start != pages);
365 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
366 struct scatterlist *sout, unsigned long pages, int need)
370 sout->dma_address = start->dma_address;
371 sout->dma_length = start->length;
374 return __dma_map_cont(dev, start, nelems, sout, pages);
378 * DMA map all entries in a scatterlist.
379 * Merge chunks that have page aligned sizes into a continuous mapping.
381 static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
382 enum dma_data_direction dir, struct dma_attrs *attrs)
384 struct scatterlist *s, *ps, *start_sg, *sgmap;
385 int need = 0, nextneed, i, out, start;
386 unsigned long pages = 0;
387 unsigned int seg_size;
388 unsigned int max_seg_size;
394 dev = &x86_dma_fallback_dev;
401 max_seg_size = dma_get_max_seg_size(dev);
402 ps = NULL; /* shut up gcc */
404 for_each_sg(sg, s, nents, i) {
405 dma_addr_t addr = sg_phys(s);
407 s->dma_address = addr;
408 BUG_ON(s->length == 0);
410 nextneed = need_iommu(dev, addr, s->length);
412 /* Handle the previous not yet processed entries */
415 * Can only merge when the last chunk ends on a
416 * page boundary and the new one doesn't have an
419 if (!iommu_merge || !nextneed || !need || s->offset ||
420 (s->length + seg_size > max_seg_size) ||
421 (ps->offset + ps->length) % PAGE_SIZE) {
422 if (dma_map_cont(dev, start_sg, i - start,
423 sgmap, pages, need) < 0)
428 sgmap = sg_next(sgmap);
435 seg_size += s->length;
437 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
440 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
445 sgmap = sg_next(sgmap);
446 sgmap->dma_length = 0;
452 gart_unmap_sg(dev, sg, out, dir, NULL);
454 /* When it was forced or merged try again in a dumb way */
455 if (force_iommu || iommu_merge) {
456 out = dma_map_sg_nonforce(dev, sg, nents, dir);
460 if (panic_on_overflow)
461 panic("dma_map_sg: overflow on %lu pages\n", pages);
463 iommu_full(dev, pages << PAGE_SHIFT, dir);
464 for_each_sg(sg, s, nents, i)
465 s->dma_address = bad_dma_addr;
469 /* allocate and map a coherent mapping */
471 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
475 unsigned long align_mask;
478 if (force_iommu && !(flag & GFP_DMA)) {
479 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
480 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
484 align_mask = (1UL << get_order(size)) - 1;
485 paddr = dma_map_area(dev, page_to_phys(page), size,
486 DMA_BIDIRECTIONAL, align_mask);
489 if (paddr != bad_dma_addr) {
491 return page_address(page);
493 __free_pages(page, get_order(size));
495 return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
500 /* free a coherent mapping */
502 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
505 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
506 free_pages((unsigned long)vaddr, get_order(size));
509 static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
511 return (dma_addr == bad_dma_addr);
516 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
521 iommu_size = aper_size;
526 a = aper + iommu_size;
527 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
529 if (iommu_size < 64*1024*1024) {
531 "PCI-DMA: Warning: Small IOMMU %luMB."
532 " Consider increasing the AGP aperture in BIOS\n",
539 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
541 unsigned aper_size = 0, aper_base_32, aper_order;
544 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
545 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
546 aper_order = (aper_order >> 1) & 7;
548 aper_base = aper_base_32 & 0x7fff;
551 aper_size = (32 * 1024 * 1024) << aper_order;
552 if (aper_base + aper_size > 0x100000000UL || !aper_size)
559 static void enable_gart_translations(void)
563 if (!k8_northbridges.gart_supported)
566 for (i = 0; i < k8_northbridges.num; i++) {
567 struct pci_dev *dev = k8_northbridges.nb_misc[i];
569 enable_gart_translation(dev, __pa(agp_gatt_table));
572 /* Flush the GART-TLB to remove stale entries */
577 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
578 * resume in the same way as they are handled in gart_iommu_hole_init().
580 static bool fix_up_north_bridges;
581 static u32 aperture_order;
582 static u32 aperture_alloc;
584 void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
586 fix_up_north_bridges = true;
587 aperture_order = aper_order;
588 aperture_alloc = aper_alloc;
591 static void gart_fixup_northbridges(struct sys_device *dev)
595 if (!fix_up_north_bridges)
598 if (!k8_northbridges.gart_supported)
601 pr_info("PCI-DMA: Restoring GART aperture settings\n");
603 for (i = 0; i < k8_northbridges.num; i++) {
604 struct pci_dev *dev = k8_northbridges.nb_misc[i];
607 * Don't enable translations just yet. That is the next
608 * step. Restore the pre-suspend aperture settings.
610 gart_set_size_and_enable(dev, aperture_order);
611 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
615 static int gart_resume(struct sys_device *dev)
617 pr_info("PCI-DMA: Resuming GART IOMMU\n");
619 gart_fixup_northbridges(dev);
621 enable_gart_translations();
626 static int gart_suspend(struct sys_device *dev, pm_message_t state)
631 static struct sysdev_class gart_sysdev_class = {
633 .suspend = gart_suspend,
634 .resume = gart_resume,
638 static struct sys_device device_gart = {
639 .cls = &gart_sysdev_class,
643 * Private Northbridge GATT initialization in case we cannot use the
644 * AGP driver for some reason.
646 static __init int init_k8_gatt(struct agp_kern_info *info)
648 unsigned aper_size, gatt_size, new_aper_size;
649 unsigned aper_base, new_aper_base;
654 pr_info("PCI-DMA: Disabling AGP.\n");
656 aper_size = aper_base = info->aper_size = 0;
658 for (i = 0; i < k8_northbridges.num; i++) {
659 dev = k8_northbridges.nb_misc[i];
660 new_aper_base = read_aperture(dev, &new_aper_size);
665 aper_size = new_aper_size;
666 aper_base = new_aper_base;
668 if (aper_size != new_aper_size || aper_base != new_aper_base)
674 info->aper_base = aper_base;
675 info->aper_size = aper_size >> 20;
677 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
678 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
679 get_order(gatt_size));
681 panic("Cannot allocate GATT table");
682 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
683 panic("Could not set GART PTEs to uncacheable pages");
685 agp_gatt_table = gatt;
687 error = sysdev_class_register(&gart_sysdev_class);
689 error = sysdev_register(&device_gart);
691 panic("Could not register gart_sysdev -- "
692 "would corrupt data on next suspend");
696 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
697 aper_base, aper_size>>10);
702 /* Should not happen anymore */
703 pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
704 "falling back to iommu=soft.\n");
708 static struct dma_map_ops gart_dma_ops = {
709 .map_sg = gart_map_sg,
710 .unmap_sg = gart_unmap_sg,
711 .map_page = gart_map_page,
712 .unmap_page = gart_unmap_page,
713 .alloc_coherent = gart_alloc_coherent,
714 .free_coherent = gart_free_coherent,
715 .mapping_error = gart_mapping_error,
718 static void gart_iommu_shutdown(void)
723 /* don't shutdown it if there is AGP installed */
727 if (!k8_northbridges.gart_supported)
730 for (i = 0; i < k8_northbridges.num; i++) {
733 dev = k8_northbridges.nb_misc[i];
734 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
738 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
742 int __init gart_iommu_init(void)
744 struct agp_kern_info info;
745 unsigned long iommu_start;
746 unsigned long aper_base, aper_size;
747 unsigned long start_pfn, end_pfn;
748 unsigned long scratch;
751 if (!k8_northbridges.gart_supported)
754 #ifndef CONFIG_AGP_AMD64
757 /* Makefile puts PCI initialization via subsys_initcall first. */
758 /* Add other K8 AGP bridge drivers here */
760 (agp_amd64_init() < 0) ||
761 (agp_copy_info(agp_bridge, &info) < 0);
765 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
766 !gart_iommu_aperture ||
767 (no_agp && init_k8_gatt(&info) < 0)) {
768 if (max_pfn > MAX_DMA32_PFN) {
769 pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
770 pr_warning("falling back to iommu=soft.\n");
775 /* need to map that range */
776 aper_size = info.aper_size << 20;
777 aper_base = info.aper_base;
778 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
780 if (end_pfn > max_low_pfn_mapped) {
781 start_pfn = (aper_base>>PAGE_SHIFT);
782 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
785 pr_info("PCI-DMA: using GART IOMMU.\n");
786 iommu_size = check_iommu_size(info.aper_base, aper_size);
787 iommu_pages = iommu_size >> PAGE_SHIFT;
789 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
790 get_order(iommu_pages/8));
791 if (!iommu_gart_bitmap)
792 panic("Cannot allocate iommu bitmap\n");
794 #ifdef CONFIG_IOMMU_LEAK
798 ret = dma_debug_resize_entries(iommu_pages);
800 pr_debug("PCI-DMA: Cannot trace all the entries\n");
805 * Out of IOMMU space handling.
806 * Reserve some invalid pages at the beginning of the GART.
808 bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
810 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
813 agp_memory_reserved = iommu_size;
814 iommu_start = aper_size - iommu_size;
815 iommu_bus_base = info.aper_base + iommu_start;
816 bad_dma_addr = iommu_bus_base;
817 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
820 * Unmap the IOMMU part of the GART. The alias of the page is
821 * always mapped with cache enabled and there is no full cache
822 * coherency across the GART remapping. The unmapping avoids
823 * automatic prefetches from the CPU allocating cache lines in
824 * there. All CPU accesses are done via the direct mapping to
825 * the backing memory. The GART address is only used by PCI
828 set_memory_np((unsigned long)__va(iommu_bus_base),
829 iommu_size >> PAGE_SHIFT);
831 * Tricky. The GART table remaps the physical memory range,
832 * so the CPU wont notice potential aliases and if the memory
833 * is remapped to UC later on, we might surprise the PCI devices
834 * with a stray writeout of a cacheline. So play it sure and
835 * do an explicit, full-scale wbinvd() _after_ having marked all
836 * the pages as Not-Present:
841 * Now all caches are flushed and we can safely enable
842 * GART hardware. Doing it early leaves the possibility
843 * of stale cache entries that can lead to GART PTE
846 enable_gart_translations();
849 * Try to workaround a bug (thanks to BenH):
850 * Set unmapped entries to a scratch page instead of 0.
851 * Any prefetches that hit unmapped entries won't get an bus abort
852 * then. (P2P bridge may be prefetching on DMA reads).
854 scratch = get_zeroed_page(GFP_KERNEL);
856 panic("Cannot allocate iommu scratch page");
857 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
858 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
859 iommu_gatt_base[i] = gart_unmapped_entry;
862 dma_ops = &gart_dma_ops;
863 x86_platform.iommu_shutdown = gart_iommu_shutdown;
869 void __init gart_parse_options(char *p)
873 #ifdef CONFIG_IOMMU_LEAK
874 if (!strncmp(p, "leak", 4)) {
879 if (isdigit(*p) && get_option(&p, &arg))
880 iommu_leak_pages = arg;
883 if (isdigit(*p) && get_option(&p, &arg))
885 if (!strncmp(p, "fullflush", 9))
887 if (!strncmp(p, "nofullflush", 11))
889 if (!strncmp(p, "noagp", 5))
891 if (!strncmp(p, "noaperture", 10))
893 /* duplicated from pci-dma.c */
894 if (!strncmp(p, "force", 5))
895 gart_iommu_aperture_allowed = 1;
896 if (!strncmp(p, "allowed", 7))
897 gart_iommu_aperture_allowed = 1;
898 if (!strncmp(p, "memaper", 7)) {
899 fallback_aper_force = 1;
903 if (get_option(&p, &arg))
904 fallback_aper_order = arg;