Merge branch 'x86/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip...
[pandora-kernel.git] / arch / x86 / kernel / amd_iommu_init.c
index 0f5a911..c9d8ff2 100644 (file)
 #include <asm/pci-direct.h>
 #include <asm/amd_iommu_types.h>
 #include <asm/amd_iommu.h>
-#include <asm/gart.h>
+#include <asm/iommu.h>
 
 /*
  * definitions for the ACPI scanning code
  */
-#define DEVID(bus, devfn) (((bus) << 8) | (devfn))
 #define PCI_BUS(x) (((x) >> 8) & 0xff)
 #define IVRS_HEADER_LENGTH 48
 
@@ -118,12 +117,12 @@ static int __initdata amd_iommu_detected;
 
 u16 amd_iommu_last_bdf;                        /* largest PCI device id we have
                                           to handle */
-struct list_head amd_iommu_unity_map;  /* a list of required unity mappings
+LIST_HEAD(amd_iommu_unity_map);                /* a list of required unity mappings
                                           we find in ACPI */
 unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
 int amd_iommu_isolate;                 /* if 1, device isolation is enabled */
 
-struct list_head amd_iommu_list;       /* list of all AMD IOMMUs in the
+LIST_HEAD(amd_iommu_list);             /* list of all AMD IOMMUs in the
                                           system */
 
 /*
@@ -295,7 +294,7 @@ static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
        u32 cap;
 
        cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
-       update_last_devid(DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
+       update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
 
        return 0;
 }
@@ -394,17 +393,15 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
  */
 static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
 {
-       u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL,
+       u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                        get_order(CMD_BUFFER_SIZE));
-       u64 entry = 0;
+       u64 entry;
 
        if (cmd_buf == NULL)
                return NULL;
 
        iommu->cmd_buf_size = CMD_BUFFER_SIZE;
 
-       memset(cmd_buf, 0, CMD_BUFFER_SIZE);
-
        entry = (u64)virt_to_phys(cmd_buf);
        entry |= MMIO_CMD_SIZE_512;
        memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
@@ -429,11 +426,18 @@ static void set_dev_entry_bit(u16 devid, u8 bit)
        amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
 }
 
+/* Writes the specific IOMMU for a device into the rlookup table */
+static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
+{
+       amd_iommu_rlookup_table[devid] = iommu;
+}
+
 /*
  * This function takes the device specific flags read from the ACPI
  * table and sets up the device table entry with that information
  */
-static void __init set_dev_entry_from_acpi(u16 devid, u32 flags, u32 ext_flags)
+static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
+                                          u16 devid, u32 flags, u32 ext_flags)
 {
        if (flags & ACPI_DEVFLAG_INITPASS)
                set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
@@ -449,12 +453,8 @@ static void __init set_dev_entry_from_acpi(u16 devid, u32 flags, u32 ext_flags)
                set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
        if (flags & ACPI_DEVFLAG_LINT1)
                set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
-}
 
-/* Writes the specific IOMMU for a device into the rlookup table */
-static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
-{
-       amd_iommu_rlookup_table[devid] = iommu;
+       set_iommu_for_device(iommu, devid);
 }
 
 /*
@@ -496,8 +496,10 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
        iommu->cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_CAP_HDR_OFFSET);
 
        range = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
-       iommu->first_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_FD(range));
-       iommu->last_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_LD(range));
+       iommu->first_device = calc_devid(MMIO_GET_BUS(range),
+                                        MMIO_GET_FD(range));
+       iommu->last_device = calc_devid(MMIO_GET_BUS(range),
+                                       MMIO_GET_LD(range));
 }
 
 /*
@@ -511,7 +513,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
        u8 *end = p, flags = 0;
        u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
        u32 ext_flags = 0;
-       bool alias = 0;
+       bool alias = false;
        struct ivhd_entry *e;
 
        /*
@@ -551,22 +553,23 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
                case IVHD_DEV_ALL:
                        for (dev_i = iommu->first_device;
                                        dev_i <= iommu->last_device; ++dev_i)
-                               set_dev_entry_from_acpi(dev_i, e->flags, 0);
+                               set_dev_entry_from_acpi(iommu, dev_i,
+                                                       e->flags, 0);
                        break;
                case IVHD_DEV_SELECT:
                        devid = e->devid;
-                       set_dev_entry_from_acpi(devid, e->flags, 0);
+                       set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
                        break;
                case IVHD_DEV_SELECT_RANGE_START:
                        devid_start = e->devid;
                        flags = e->flags;
                        ext_flags = 0;
-                       alias = 0;
+                       alias = false;
                        break;
                case IVHD_DEV_ALIAS:
                        devid = e->devid;
                        devid_to = e->ext >> 8;
-                       set_dev_entry_from_acpi(devid, e->flags, 0);
+                       set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
                        amd_iommu_alias_table[devid] = devid_to;
                        break;
                case IVHD_DEV_ALIAS_RANGE:
@@ -574,24 +577,25 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
                        flags = e->flags;
                        devid_to = e->ext >> 8;
                        ext_flags = 0;
-                       alias = 1;
+                       alias = true;
                        break;
                case IVHD_DEV_EXT_SELECT:
                        devid = e->devid;
-                       set_dev_entry_from_acpi(devid, e->flags, e->ext);
+                       set_dev_entry_from_acpi(iommu, devid, e->flags,
+                                               e->ext);
                        break;
                case IVHD_DEV_EXT_SELECT_RANGE:
                        devid_start = e->devid;
                        flags = e->flags;
                        ext_flags = e->ext;
-                       alias = 0;
+                       alias = false;
                        break;
                case IVHD_DEV_RANGE_END:
                        devid = e->devid;
                        for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
                                if (alias)
                                        amd_iommu_alias_table[dev_i] = devid_to;
-                               set_dev_entry_from_acpi(
+                               set_dev_entry_from_acpi(iommu,
                                                amd_iommu_alias_table[dev_i],
                                                flags, ext_flags);
                        }
@@ -675,8 +679,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
        struct amd_iommu *iommu;
        int ret;
 
-       INIT_LIST_HEAD(&amd_iommu_list);
-
        end += table->length;
        p += IVRS_HEADER_LENGTH;
 
@@ -782,8 +784,6 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
        u8 *p = (u8 *)table, *end = (u8 *)table;
        struct ivmd_header *m;
 
-       INIT_LIST_HEAD(&amd_iommu_unity_map);
-
        end += table->length;
        p += IVRS_HEADER_LENGTH;
 
@@ -896,7 +896,7 @@ int __init amd_iommu_init(void)
        ret = -ENOMEM;
 
        /* Device table - directly used by all IOMMUs */
-       amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL,
+       amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                      get_order(dev_table_size));
        if (amd_iommu_dev_table == NULL)
                goto out;
@@ -920,27 +920,23 @@ int __init amd_iommu_init(void)
         * Protection Domain table - maps devices to protection domains
         * This table has the same size as the rlookup_table
         */
-       amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL,
+       amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                     get_order(rlookup_table_size));
        if (amd_iommu_pd_table == NULL)
                goto free;
 
-       amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(GFP_KERNEL,
+       amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
+                                           GFP_KERNEL | __GFP_ZERO,
                                            get_order(MAX_DOMAIN_ID/8));
        if (amd_iommu_pd_alloc_bitmap == NULL)
                goto free;
 
        /*
-        * memory is allocated now; initialize the device table with all zeroes
-        * and let all alias entries point to itself
+        * let all alias entries point to itself
         */
-       memset(amd_iommu_dev_table, 0, dev_table_size);
        for (i = 0; i < amd_iommu_last_bdf; ++i)
                amd_iommu_alias_table[i] = i;
 
-       memset(amd_iommu_pd_table, 0, rlookup_table_size);
-       memset(amd_iommu_pd_alloc_bitmap, 0, MAX_DOMAIN_ID / 8);
-
        /*
         * never allocate domain 0 because its used as the non-allocated and
         * error value placeholder
@@ -1052,20 +1048,10 @@ static int __init parse_amd_iommu_options(char *str)
 
 static int __init parse_amd_iommu_size_options(char *str)
 {
-       for (; *str; ++str) {
-               if (strcmp(str, "32M") == 0)
-                       amd_iommu_aperture_order = 25;
-               if (strcmp(str, "64M") == 0)
-                       amd_iommu_aperture_order = 26;
-               if (strcmp(str, "128M") == 0)
-                       amd_iommu_aperture_order = 27;
-               if (strcmp(str, "256M") == 0)
-                       amd_iommu_aperture_order = 28;
-               if (strcmp(str, "512M") == 0)
-                       amd_iommu_aperture_order = 29;
-               if (strcmp(str, "1G") == 0)
-                       amd_iommu_aperture_order = 30;
-       }
+       unsigned order = PAGE_SHIFT + get_order(memparse(str, &str));
+
+       if ((order > 24) && (order < 31))
+               amd_iommu_aperture_order = order;
 
        return 1;
 }