Merge branch 'master' into upstream
[pandora-kernel.git] / drivers / net / qla3xxx.c
old mode 100644 (file)
new mode 100755 (executable)
index 8844c20..a142cdf
@@ -22,6 +22,7 @@
 #include <linux/errno.h>
 #include <linux/ioport.h>
 #include <linux/ip.h>
+#include <linux/in.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/netdevice.h>
@@ -63,6 +64,7 @@ MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
 
 static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
        /* required last entry */
        {0,}
 };
@@ -1475,6 +1477,10 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
                         2) << 7))
                return -1;
 
+       if (qdev->device_id == QL3032_DEVICE_ID)
+               ql_write_page0_reg(qdev, 
+                       &port_regs->macMIIMgmtControlReg, 0x0f00000);
+
        /* Divide 125MHz clock by 28 to meet PHY timing requirements */
        reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
 
@@ -1706,18 +1712,42 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
                                   struct ob_mac_iocb_rsp *mac_rsp)
 {
        struct ql_tx_buf_cb *tx_cb;
+       int i;
 
        tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
        pci_unmap_single(qdev->pdev,
-                        pci_unmap_addr(tx_cb, mapaddr),
-                        pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
-       dev_kfree_skb_irq(tx_cb->skb);
+                        pci_unmap_addr(&tx_cb->map[0], mapaddr),
+                        pci_unmap_len(&tx_cb->map[0], maplen),
+                        PCI_DMA_TODEVICE);
+       tx_cb->seg_count--;
+       if (tx_cb->seg_count) {
+               for (i = 1; i < tx_cb->seg_count; i++) {
+                       pci_unmap_page(qdev->pdev,
+                                      pci_unmap_addr(&tx_cb->map[i],
+                                                     mapaddr),
+                                      pci_unmap_len(&tx_cb->map[i], maplen),
+                                      PCI_DMA_TODEVICE);
+               }
+       }
        qdev->stats.tx_packets++;
        qdev->stats.tx_bytes += tx_cb->skb->len;
+       dev_kfree_skb_irq(tx_cb->skb);
        tx_cb->skb = NULL;
        atomic_inc(&qdev->tx_count);
 }
 
+/*
+ * The difference between 3022 and 3032 for inbound completions:
+ * 3022 uses two buffers per completion.  The first buffer contains 
+ * (some) header info, the second the remainder of the headers plus 
+ * the data.  For this chip we reserve some space at the top of the 
+ * receive buffer so that the header info in buffer one can be 
+ * prepended to the buffer two.  Buffer two is the sent up while 
+ * buffer one is returned to the hardware to be reused.
+ * 3032 receives all of it's data and headers in one buffer for a 
+ * simpler process.  3032 also supports checksum verification as
+ * can be seen in ql_process_macip_rx_intr().
+ */
 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
                                   struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
 {
@@ -1740,14 +1770,17 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
        qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
        qdev->small_buf_release_cnt++;
 
-       /* start of first buffer */
-       lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
-       lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
-       qdev->lrg_buf_release_cnt++;
-       if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
-               qdev->lrg_buf_index = 0;
-       curr_ial_ptr++;         /* 64-bit pointers require two incs. */
-       curr_ial_ptr++;
+       if (qdev->device_id == QL3022_DEVICE_ID) {
+               /* start of first buffer (3022 only) */
+               lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+               lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
+               qdev->lrg_buf_release_cnt++;
+               if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
+                       qdev->lrg_buf_index = 0;
+               }
+               curr_ial_ptr++; /* 64-bit pointers require two incs. */
+               curr_ial_ptr++;
+       }
 
        /* start of second buffer */
        lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1778,7 +1811,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
        qdev->ndev->last_rx = jiffies;
        lrg_buf_cb2->skb = NULL;
 
-       ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+       if (qdev->device_id == QL3022_DEVICE_ID)
+               ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
        ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
 }
 
@@ -1790,7 +1824,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
        struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
        struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
        u32 *curr_ial_ptr;
-       struct sk_buff *skb1, *skb2;
+       struct sk_buff *skb1 = NULL, *skb2;
        struct net_device *ndev = qdev->ndev;
        u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
        u16 size = 0;
@@ -1806,16 +1840,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
        qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
        qdev->small_buf_release_cnt++;
 
-       /* start of first buffer */
-       lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
-       lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
-
-       qdev->lrg_buf_release_cnt++;
-       if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
-               qdev->lrg_buf_index = 0;
-       skb1 = lrg_buf_cb1->skb;
-       curr_ial_ptr++;         /* 64-bit pointers require two incs. */
-       curr_ial_ptr++;
+       if (qdev->device_id == QL3022_DEVICE_ID) {
+               /* start of first buffer on 3022 */
+               lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+               lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
+               qdev->lrg_buf_release_cnt++;
+               if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
+                       qdev->lrg_buf_index = 0;
+               skb1 = lrg_buf_cb1->skb;
+               curr_ial_ptr++; /* 64-bit pointers require two incs. */
+               curr_ial_ptr++;
+               size = ETH_HLEN;
+               if (*((u16 *) skb1->data) != 0xFFFF)
+                       size += VLAN_ETH_HLEN - ETH_HLEN;
+       }
 
        /* start of second buffer */
        lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1825,18 +1863,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
        if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
                qdev->lrg_buf_index = 0;
 
-       qdev->stats.rx_packets++;
-       qdev->stats.rx_bytes += length;
-
-       /*
-        * Copy the ethhdr from first buffer to second. This
-        * is necessary for IP completions.
-        */
-       if (*((u16 *) skb1->data) != 0xFFFF)
-               size = VLAN_ETH_HLEN;
-       else
-               size = ETH_HLEN;
-
        skb_put(skb2, length);  /* Just the second buffer length here. */
        pci_unmap_single(qdev->pdev,
                         pci_unmap_addr(lrg_buf_cb2, mapaddr),
@@ -1844,16 +1870,40 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
                         PCI_DMA_FROMDEVICE);
        prefetch(skb2->data);
 
-       memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
-       skb2->dev = qdev->ndev;
        skb2->ip_summed = CHECKSUM_NONE;
+       if (qdev->device_id == QL3022_DEVICE_ID) {
+               /*
+                * Copy the ethhdr from first buffer to second. This
+                * is necessary for 3022 IP completions.
+                */
+               memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
+       } else {
+               u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
+               if (checksum & 
+                       (IB_IP_IOCB_RSP_3032_ICE | 
+                        IB_IP_IOCB_RSP_3032_CE | 
+                        IB_IP_IOCB_RSP_3032_NUC)) {
+                       printk(KERN_ERR
+                              "%s: Bad checksum for this %s packet, checksum = %x.\n",
+                              __func__,
+                              ((checksum & 
+                               IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
+                               "UDP"),checksum);
+               } else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
+                       skb2->ip_summed = CHECKSUM_UNNECESSARY;
+               } 
+       }
+       skb2->dev = qdev->ndev;
        skb2->protocol = eth_type_trans(skb2, qdev->ndev);
 
        netif_receive_skb(skb2);
+       qdev->stats.rx_packets++;
+       qdev->stats.rx_bytes += length;
        ndev->last_rx = jiffies;
        lrg_buf_cb2->skb = NULL;
 
-       ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+       if (qdev->device_id == QL3022_DEVICE_ID)
+               ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
        ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
 }
 
@@ -1880,12 +1930,14 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
                        break;
 
                case OPCODE_IB_MAC_IOCB:
+               case OPCODE_IB_3032_MAC_IOCB:
                        ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
                                               net_rsp);
                        (*rx_cleaned)++;
                        break;
 
                case OPCODE_IB_IP_IOCB:
+               case OPCODE_IB_3032_IP_IOCB:
                        ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
                                                 net_rsp);
                        (*rx_cleaned)++;
@@ -2032,13 +2084,96 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
        return IRQ_RETVAL(handled);
 }
 
+/*
+ * Get the total number of segments needed for the 
+ * given number of fragments.  This is necessary because
+ * outbound address lists (OAL) will be used when more than
+ * two frags are given.  Each address list has 5 addr/len 
+ * pairs.  The 5th pair in each AOL is used to  point to
+ * the next AOL if more frags are coming.  
+ * That is why the frags:segment count  ratio is not linear.
+ */
+static int ql_get_seg_count(unsigned short frags)
+{
+       switch(frags) {
+       case 0: return 1;       /* just the skb->data seg */
+       case 1: return 2;       /* skb->data + 1 frag */
+       case 2: return 3;       /* skb->data + 2 frags */
+       case 3: return 5;       /* skb->data + 1 frag + 1 AOL containting 2 frags */
+       case 4: return 6;
+       case 5: return 7;
+       case 6: return 8;
+       case 7: return 10;
+       case 8: return 11;
+       case 9: return 12;
+       case 10: return 13;
+       case 11: return 15;
+       case 12: return 16;
+       case 13: return 17;
+       case 14: return 18;
+       case 15: return 20;
+       case 16: return 21;
+       case 17: return 22;
+       case 18: return 23;
+       }
+       return -1;
+}
+
+static void ql_hw_csum_setup(struct sk_buff *skb,
+                            struct ob_mac_iocb_req *mac_iocb_ptr)
+{
+       struct ethhdr *eth;
+       struct iphdr *ip = NULL;
+       u8 offset = ETH_HLEN;
+
+       eth = (struct ethhdr *)(skb->data);
+
+       if (eth->h_proto == __constant_htons(ETH_P_IP)) {
+               ip = (struct iphdr *)&skb->data[ETH_HLEN];
+       } else if (eth->h_proto == htons(ETH_P_8021Q) &&
+                  ((struct vlan_ethhdr *)skb->data)->
+                  h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
+               ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
+               offset = VLAN_ETH_HLEN;
+       }
+
+       if (ip) {
+               if (ip->protocol == IPPROTO_TCP) {
+                       mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
+                       mac_iocb_ptr->ip_hdr_off = offset;
+                       mac_iocb_ptr->ip_hdr_len = ip->ihl;
+               } else if (ip->protocol == IPPROTO_UDP) {
+                       mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
+                       mac_iocb_ptr->ip_hdr_off = offset;
+                       mac_iocb_ptr->ip_hdr_len = ip->ihl;
+               }
+       }
+}
+
+/*
+ * The difference between 3022 and 3032 sends:
+ * 3022 only supports a simple single segment transmission.
+ * 3032 supports checksumming and scatter/gather lists (fragments).
+ * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 
+ * in the IOCB plus a chain of outbound address lists (OAL) that 
+ * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th) 
+ * will used to point to an OAL when more ALP entries are required.  
+ * The IOCB is always the top of the chain followed by one or more 
+ * OALs (when necessary).
+ */
 static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
 {
        struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
        struct ql_tx_buf_cb *tx_cb;
+       u32 tot_len = skb->len;
+       struct oal *oal;
+       struct oal_entry *oal_entry;
+       int len;
        struct ob_mac_iocb_req *mac_iocb_ptr;
        u64 map;
+       int seg_cnt, seg = 0;
+       int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
 
        if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
                if (!netif_queue_stopped(ndev))
@@ -2046,21 +2181,79 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
                return NETDEV_TX_BUSY;
        }
        tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
+       seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
+       if(seg_cnt == -1) {
+               printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
+               return NETDEV_TX_OK;
+
+       }
        mac_iocb_ptr = tx_cb->queue_entry;
        memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
        mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
        mac_iocb_ptr->flags |= qdev->mb_bit_mask;
        mac_iocb_ptr->transaction_id = qdev->req_producer_index;
-       mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len);
+       mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
        tx_cb->skb = skb;
-       map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-       mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map));
-       mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map));
-       mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E);
-       pci_unmap_addr_set(tx_cb, mapaddr, map);
-       pci_unmap_len_set(tx_cb, maplen, skb->len);
-       atomic_dec(&qdev->tx_count);
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               ql_hw_csum_setup(skb, mac_iocb_ptr);
+       len = skb_headlen(skb);
+       map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+       oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+       oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+       oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+       oal_entry->len = cpu_to_le32(len);
+       pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+       pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
+       seg++;
+
+       if (!skb_shinfo(skb)->nr_frags) {
+               /* Terminate the last segment. */
+               oal_entry->len =
+                   cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
+       } else {
+               int i;
+               oal = tx_cb->oal;
+               for (i=0; i<frag_cnt; i++,seg++) {
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+                       oal_entry++;
+                       if ((seg == 2 && seg_cnt > 3) ||        /* Check for continuation */
+                           (seg == 7 && seg_cnt > 8) ||        /* requirements. It's strange */
+                           (seg == 12 && seg_cnt > 13) ||      /* but necessary. */
+                           (seg == 17 && seg_cnt > 18)) {
+                               /* Continuation entry points to outbound address list. */
+                               map = pci_map_single(qdev->pdev, oal,
+                                                    sizeof(struct oal),
+                                                    PCI_DMA_TODEVICE);
+                               oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+                               oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+                               oal_entry->len =
+                                   cpu_to_le32(sizeof(struct oal) |
+                                               OAL_CONT_ENTRY);
+                               pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
+                                                  map);
+                               pci_unmap_len_set(&tx_cb->map[seg], maplen,
+                                                 len);
+                               oal_entry = (struct oal_entry *)oal;
+                               oal++;
+                               seg++;
+                       }
 
+                       map =
+                           pci_map_page(qdev->pdev, frag->page,
+                                        frag->page_offset, frag->size,
+                                        PCI_DMA_TODEVICE);
+                       oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+                       oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+                       oal_entry->len = cpu_to_le32(frag->size);
+                       pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+                       pci_unmap_len_set(&tx_cb->map[seg], maplen,
+                                         frag->size);
+               }
+               /* Terminate the last segment. */
+               oal_entry->len =
+                   cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
+       }
+       wmb();
        qdev->req_producer_index++;
        if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
                qdev->req_producer_index = 0;
@@ -2074,8 +2267,10 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
                printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
                       ndev->name, qdev->req_producer_index, skb->len);
 
+       atomic_dec(&qdev->tx_count);
        return NETDEV_TX_OK;
 }
+
 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
 {
        qdev->req_q_size =
@@ -2359,7 +2554,22 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
        return 0;
 }
 
-static void ql_create_send_free_list(struct ql3_adapter *qdev)
+static void ql_free_send_free_list(struct ql3_adapter *qdev)
+{
+       struct ql_tx_buf_cb *tx_cb;
+       int i;
+
+       tx_cb = &qdev->tx_buf[0];
+       for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+               if (tx_cb->oal) {
+                       kfree(tx_cb->oal);
+                       tx_cb->oal = NULL;
+               }
+               tx_cb++;
+       }
+}
+
+static int ql_create_send_free_list(struct ql3_adapter *qdev)
 {
        struct ql_tx_buf_cb *tx_cb;
        int i;
@@ -2368,11 +2578,16 @@ static void ql_create_send_free_list(struct ql3_adapter *qdev)
 
        /* Create free list of transmit buffers */
        for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+
                tx_cb = &qdev->tx_buf[i];
                tx_cb->skb = NULL;
                tx_cb->queue_entry = req_q_curr;
                req_q_curr++;
+               tx_cb->oal = kmalloc(512, GFP_KERNEL);
+               if (tx_cb->oal == NULL)
+                       return -1;
        }
+       return 0;
 }
 
 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
@@ -2447,12 +2662,14 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
 
        /* Initialize the large buffer queue. */
        ql_init_large_buffers(qdev);
-       ql_create_send_free_list(qdev);
+       if (ql_create_send_free_list(qdev))
+               goto err_free_list;
 
        qdev->rsp_current = qdev->rsp_q_virt_addr;
 
        return 0;
-
+err_free_list:
+       ql_free_send_free_list(qdev);
 err_small_buffers:
        ql_free_buffer_queues(qdev);
 err_buffer_queues:
@@ -2468,6 +2685,7 @@ err_req_rsp:
 
 static void ql_free_mem_resources(struct ql3_adapter *qdev)
 {
+       ql_free_send_free_list(qdev);
        ql_free_large_buffers(qdev);
        ql_free_small_buffers(qdev);
        ql_free_buffer_queues(qdev);
@@ -2766,11 +2984,20 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
        }
 
        /* Enable Ethernet Function */
-       value =
-           (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
-            PORT_CONTROL_HH);
-       ql_write_page0_reg(qdev, &port_regs->portControl,
-                          ((value << 16) | value));
+       if (qdev->device_id == QL3032_DEVICE_ID) {
+               value =
+                   (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
+                    QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
+               ql_write_page0_reg(qdev, &port_regs->functionControl,
+                                  ((value << 16) | value));
+       } else {
+               value =
+                   (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
+                    PORT_CONTROL_HH);
+               ql_write_page0_reg(qdev, &port_regs->portControl,
+                                  ((value << 16) | value));
+       }
+
 
 out:
        return status;
@@ -2917,8 +3144,10 @@ static void ql_display_dev_info(struct net_device *ndev)
        struct pci_dev *pdev = qdev->pdev;
 
        printk(KERN_INFO PFX
-              "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n",
-              DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot);
+              "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
+              DRV_NAME, qdev->index, qdev->chip_rev_id,
+              (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
+              qdev->pci_slot);
        printk(KERN_INFO PFX
               "%s Interface.\n",
               test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
@@ -2999,7 +3228,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
 {
        struct net_device *ndev = qdev->ndev;
        int err;
-       unsigned long irq_flags = SA_SAMPLE_RANDOM | SA_SHIRQ;
+       unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
        unsigned long hw_flags;
 
        if (ql_alloc_mem_resources(qdev)) {
@@ -3018,7 +3247,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
                } else {
                        printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
                        set_bit(QL_MSI_ENABLED,&qdev->flags);
-                       irq_flags &= ~SA_SHIRQ;
+                       irq_flags &= ~IRQF_SHARED;
                }
        }
 
@@ -3212,15 +3441,22 @@ static void ql_reset_work(struct work_struct *work)
                 * Loop through the active list and return the skb.
                 */
                for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+                       int j;
                        tx_cb = &qdev->tx_buf[i];
                        if (tx_cb->skb) {
-
                                printk(KERN_DEBUG PFX
                                       "%s: Freeing lost SKB.\n",
                                       qdev->ndev->name);
                                pci_unmap_single(qdev->pdev,
-                                       pci_unmap_addr(tx_cb, mapaddr),
-                                       pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
+                                        pci_unmap_addr(&tx_cb->map[0], mapaddr),
+                                        pci_unmap_len(&tx_cb->map[0], maplen),
+                                        PCI_DMA_TODEVICE);
+                               for(j=1;j<tx_cb->seg_count;j++) {
+                                       pci_unmap_page(qdev->pdev,
+                                              pci_unmap_addr(&tx_cb->map[j],mapaddr),
+                                              pci_unmap_len(&tx_cb->map[j],maplen),
+                                              PCI_DMA_TODEVICE);
+                               }
                                dev_kfree_skb(tx_cb->skb);
                                tx_cb->skb = NULL;
                        }
@@ -3379,21 +3615,24 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
        SET_MODULE_OWNER(ndev);
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
-       if (pci_using_dac)
-               ndev->features |= NETIF_F_HIGHDMA;
-
        pci_set_drvdata(pdev, ndev);
 
        qdev = netdev_priv(ndev);
        qdev->index = cards_found;
        qdev->ndev = ndev;
        qdev->pdev = pdev;
+       qdev->device_id = pci_entry->device;
        qdev->port_link_state = LS_DOWN;
        if (msi)
                qdev->msi = 1;
 
        qdev->msg_enable = netif_msg_init(debug, default_msg);
 
+       if (pci_using_dac)
+               ndev->features |= NETIF_F_HIGHDMA;
+       if (qdev->device_id == QL3032_DEVICE_ID)
+               ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
+
        qdev->mem_map_registers =
            ioremap_nocache(pci_resource_start(pdev, 1),
                            pci_resource_len(qdev->pdev, 1));