Merge branch 'core-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / mlx4 / eq.c
index ce064e3..bffb799 100644 (file)
@@ -31,7 +31,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include "mlx4.h"
 #include "fw.h"
 
+enum {
+       MLX4_IRQNAME_SIZE       = 64
+};
+
 enum {
        MLX4_NUM_ASYNC_EQE      = 0x100,
        MLX4_NUM_SPARE_EQE      = 0x80,
@@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
        iounmap(priv->clr_base);
 }
 
-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int ret;
-
-       /*
-        * We assume that mapping one page is enough for the whole EQ
-        * context table.  This is fine with all current HCAs, because
-        * we only use 32 EQs and each EQ uses 64 bytes of context
-        * memory, or 1 KB total.
-        */
-       priv->eq_table.icm_virt = icm_virt;
-       priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
-       if (!priv->eq_table.icm_page)
-               return -ENOMEM;
-       priv->eq_table.icm_dma  = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
-                                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
-               __free_page(priv->eq_table.icm_page);
-               return -ENOMEM;
-       }
-
-       ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
-       if (ret) {
-               pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
-                              PCI_DMA_BIDIRECTIONAL);
-               __free_page(priv->eq_table.icm_page);
-       }
-
-       return ret;
-}
-
-void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
-       pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
-                      PCI_DMA_BIDIRECTIONAL);
-       __free_page(priv->eq_table.icm_page);
-}
-
 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
        priv->eq_table.clr_int  = priv->clr_base +
                (priv->eq_table.inta_pin < 32 ? 4 : 0);
 
-       priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL);
+       priv->eq_table.irq_names =
+               kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
+                       GFP_KERNEL);
        if (!priv->eq_table.irq_names) {
                err = -ENOMEM;
                goto err_out_bitmap;
@@ -625,8 +588,10 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
                err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
                                     (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
                                     &priv->eq_table.eq[i]);
-               if (err)
+               if (err) {
+                       --i;
                        goto err_out_unmap;
+               }
        }
 
        err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
@@ -636,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
                goto err_out_comp;
 
        if (dev->flags & MLX4_FLAG_MSI_X) {
-               static const char async_eq_name[] = "mlx4-async";
                const char *eq_name;
 
                for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
                        if (i < dev->caps.num_comp_vectors) {
-                               snprintf(priv->eq_table.irq_names + i * 16, 16,
-                                        "mlx4-comp-%d", i);
-                               eq_name = priv->eq_table.irq_names + i * 16;
-                       } else
-                               eq_name = async_eq_name;
+                               snprintf(priv->eq_table.irq_names +
+                                        i * MLX4_IRQNAME_SIZE,
+                                        MLX4_IRQNAME_SIZE,
+                                        "mlx4-comp-%d@pci:%s", i,
+                                        pci_name(dev->pdev));
+                       } else {
+                               snprintf(priv->eq_table.irq_names +
+                                        i * MLX4_IRQNAME_SIZE,
+                                        MLX4_IRQNAME_SIZE,
+                                        "mlx4-async@pci:%s",
+                                        pci_name(dev->pdev));
+                       }
 
+                       eq_name = priv->eq_table.irq_names +
+                                 i * MLX4_IRQNAME_SIZE;
                        err = request_irq(priv->eq_table.eq[i].irq,
                                          mlx4_msi_x_interrupt, 0, eq_name,
                                          priv->eq_table.eq + i);
@@ -656,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
                        priv->eq_table.eq[i].have_irq = 1;
                }
        } else {
+               snprintf(priv->eq_table.irq_names,
+                        MLX4_IRQNAME_SIZE,
+                        DRV_NAME "@pci:%s",
+                        pci_name(dev->pdev));
                err = request_irq(dev->pdev->irq, mlx4_interrupt,
-                                 IRQF_SHARED, DRV_NAME, dev);
+                                 IRQF_SHARED, priv->eq_table.irq_names, dev);
                if (err)
                        goto err_out_async;